From b3b165b49923aa728d6aaa06f8e3d17ede9aed6b Mon Sep 17 00:00:00 2001 From: Justin Aquadro Date: Sat, 26 Feb 2011 09:33:06 +0000 Subject: [PATCH] Port to C# --- NBToolkit/NBToolkit/Harness.cs | 120 + NBToolkit/NBToolkit/NBT.cs | 736 +++++++ NBToolkit/NBToolkit/NBToolkit.csproj | 131 ++ NBToolkit/NBToolkit/NBToolkit.csproj.user | 24 + NBToolkit/NBToolkit/NBToolkit.sln | 20 + NBToolkit/NBToolkit/NBToolkit.suo | Bin 0 -> 38912 bytes NBToolkit/NBToolkit/NDesk/Options.cs | 1103 ++++++++++ NBToolkit/NBToolkit/Oregen.cs | 320 +++ NBToolkit/NBToolkit/Program.cs | 110 + .../NBToolkit/Properties/AssemblyInfo.cs | 36 + NBToolkit/NBToolkit/RegionFile.cs | 376 ++++ NBToolkit/NBToolkit/RegionFileCache.cs | 97 + NBToolkit/NBToolkit/Replace.cs | 141 ++ NBToolkit/NBToolkit/TKFilter.cs | 13 + NBToolkit/NBToolkit/TKOptions.cs | 105 + NBToolkit/NBToolkit/Zlib/Crc32.cs | 572 +++++ NBToolkit/NBToolkit/Zlib/Deflate.cs | 1927 +++++++++++++++++ NBToolkit/NBToolkit/Zlib/DeflateStream.cs | 705 ++++++ NBToolkit/NBToolkit/Zlib/GZipStream.cs | 1013 +++++++++ NBToolkit/NBToolkit/Zlib/InfTree.cs | 436 ++++ NBToolkit/NBToolkit/Zlib/Inflate.cs | 1796 +++++++++++++++ NBToolkit/NBToolkit/Zlib/LICENSE.txt | 29 + .../Zlib/ParallelDeflateOutputStream.cs | 1158 ++++++++++ NBToolkit/NBToolkit/Zlib/Tree.cs | 423 ++++ NBToolkit/NBToolkit/Zlib/Zlib.cs | 456 ++++ NBToolkit/NBToolkit/Zlib/ZlibBaseStream.cs | 627 ++++++ NBToolkit/NBToolkit/Zlib/ZlibCodec.cs | 717 ++++++ NBToolkit/NBToolkit/Zlib/ZlibConstants.cs | 128 ++ NBToolkit/NBToolkit/Zlib/ZlibStream.cs | 682 ++++++ NBToolkit/chunk.h | 26 + NBToolkit/main.c | 22 + 31 files changed, 14049 insertions(+) create mode 100644 NBToolkit/NBToolkit/Harness.cs create mode 100644 NBToolkit/NBToolkit/NBT.cs create mode 100644 NBToolkit/NBToolkit/NBToolkit.csproj create mode 100644 NBToolkit/NBToolkit/NBToolkit.csproj.user create mode 100644 NBToolkit/NBToolkit/NBToolkit.sln create mode 100644 NBToolkit/NBToolkit/NBToolkit.suo create mode 100644 NBToolkit/NBToolkit/NDesk/Options.cs create mode 100644 NBToolkit/NBToolkit/Oregen.cs create mode 100644 NBToolkit/NBToolkit/Program.cs create mode 100644 NBToolkit/NBToolkit/Properties/AssemblyInfo.cs create mode 100644 NBToolkit/NBToolkit/RegionFile.cs create mode 100644 NBToolkit/NBToolkit/RegionFileCache.cs create mode 100644 NBToolkit/NBToolkit/Replace.cs create mode 100644 NBToolkit/NBToolkit/TKFilter.cs create mode 100644 NBToolkit/NBToolkit/TKOptions.cs create mode 100644 NBToolkit/NBToolkit/Zlib/Crc32.cs create mode 100644 NBToolkit/NBToolkit/Zlib/Deflate.cs create mode 100644 NBToolkit/NBToolkit/Zlib/DeflateStream.cs create mode 100644 NBToolkit/NBToolkit/Zlib/GZipStream.cs create mode 100644 NBToolkit/NBToolkit/Zlib/InfTree.cs create mode 100644 NBToolkit/NBToolkit/Zlib/Inflate.cs create mode 100644 NBToolkit/NBToolkit/Zlib/LICENSE.txt create mode 100644 NBToolkit/NBToolkit/Zlib/ParallelDeflateOutputStream.cs create mode 100644 NBToolkit/NBToolkit/Zlib/Tree.cs create mode 100644 NBToolkit/NBToolkit/Zlib/Zlib.cs create mode 100644 NBToolkit/NBToolkit/Zlib/ZlibBaseStream.cs create mode 100644 NBToolkit/NBToolkit/Zlib/ZlibCodec.cs create mode 100644 NBToolkit/NBToolkit/Zlib/ZlibConstants.cs create mode 100644 NBToolkit/NBToolkit/Zlib/ZlibStream.cs diff --git a/NBToolkit/NBToolkit/Harness.cs b/NBToolkit/NBToolkit/Harness.cs new file mode 100644 index 0000000..7a7f3a7 --- /dev/null +++ b/NBToolkit/NBToolkit/Harness.cs @@ -0,0 +1,120 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.IO; +using NBT; + +namespace NBToolkit +{ + class Harness + { + public void Run (TKOptions opt, TKFilter filter) { + string[] regions = RegionFileCache.GetRegionFileList(opt.OPT_WORLD); + + foreach (string p in regions) { + Console.WriteLine(p); + RegionFile rfile = RegionFileCache.GetRegionFile(opt.OPT_WORLD, p); + + for (int x = 0; x < 32; x++) { + for (int z = 0; z < 32; z++) { + NBT_Tree tree = new NBT_Tree(rfile.GetChunkDataInputStream(x, z)); + + // Check that tree exists + if (tree == null || tree.getRoot() == null) { + continue; + } + + NBT_Tag level = tree.getRoot().findTagByName("Level"); + if (level == null) { + continue; + } + + NBT_Tag tagcx = level.findTagByName("xPos"); + NBT_Tag tagcz = level.findTagByName("zPos"); + + if (tagcx == null || tagcz == null) { + continue; + } + + int cx = tagcx.value.toInt().data; + int cz = tagcz.value.toInt().data; + + // Exclude chunks out of range + if (opt.CH_X_GE != null && cx < opt.CH_X_GE) { + continue; + } + if (opt.CH_X_LE != null && cx > opt.CH_X_LE) { + continue; + } + if (opt.CH_Z_GE != null && cz < opt.CH_Z_GE) { + continue; + } + if (opt.CH_Z_LE != null && cz > opt.CH_Z_LE) { + continue; + } + + // Verify that chunk contains all of the INCLUDE options + bool fail = false; + foreach (int v in opt.OPT_CH_INCLUDE) { + if (!BlockScan(level, v)) { + fail = true; + break; + } + } + + // Verify that chunk does not contain any EXCLUDE options + foreach (int v in opt.OPT_CH_EXCLUDE) { + if (BlockScan(level, v)) { + fail = true; + break; + } + } + + if (fail) { + continue; + } + + if (opt.OPT_V) { + Console.WriteLine("Processing Chunk ({0}, {1})", cx, cz); + } + + filter.ApplyChunk(tree); + + Stream zipStream = RegionFileCache.getChunkDataOutputStream(opt.OPT_WORLD, cx, cz); + tree.WriteTo(zipStream); + zipStream.Close(); + } + } + } + } + + protected bool BlockScan (NBT_Tag level, int val) + { + NBT_Tag blocks = level.findTagByName("Blocks"); + if (blocks == null || blocks.type != NBT_Type.TAG_BYTE_ARRAY) { + return false; + } + + NBT_ByteArray blocks_ary = blocks.value.toByteArray(); + + for (int x = 0; x < 16; x++) { + for (int z = 0; z < 16; z++) { + for (int y = 0; y < 128; y++) { + int index = BlockIndex(x, y, z); + if (blocks_ary.data[index] == val) { + return true; + } + } + } + } + + return false; + } + + int BlockIndex (int x, int y, int z) + { + return y + (z * 128 + x * 128 * 16); + } + } +} diff --git a/NBToolkit/NBToolkit/NBT.cs b/NBToolkit/NBToolkit/NBT.cs new file mode 100644 index 0000000..7a20093 --- /dev/null +++ b/NBToolkit/NBToolkit/NBT.cs @@ -0,0 +1,736 @@ +using System; +using System.Collections.Generic; +//using System.Linq; +using System.Text; +using System.IO; +using System.IO.Compression; + +namespace NBT +{ + public enum NBT_Type + { + TAG_END = 0, + TAG_BYTE = 1, // 8 bits signed + TAG_SHORT = 2, // 16 bits signed + TAG_INT = 3, // 32 buts signed + TAG_LONG = 4, // 64 bits signed + TAG_FLOAT = 5, + TAG_DOUBLE = 6, + TAG_BYTE_ARRAY = 7, + TAG_STRING = 8, + TAG_LIST = 9, + TAG_COMPOUND = 10 + } + + public class NBT_Value + { + virtual public NBT_Byte toByte () { return null; } + virtual public NBT_Short toShort () { return null; } + virtual public NBT_Int toInt () { return null; } + virtual public NBT_Long toLong () { return null; } + virtual public NBT_Float toFloat () { return null; } + virtual public NBT_Double toDouble () { return null; } + virtual public NBT_ByteArray toByteArray () { return null; } + virtual public NBT_String toString () { return null; } + virtual public NBT_List toList () { return null; } + virtual public NBT_Compound toCompound () { return null; } + + virtual public NBT_Type getType () { return NBT_Type.TAG_END; } + } + + public class NBT_Byte : NBT_Value + { + public byte data = 0; + override public NBT_Byte toByte () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_BYTE; } + } + + public class NBT_Short : NBT_Value + { + public short data = 0; + override public NBT_Short toShort () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_SHORT; } + } + + public class NBT_Int : NBT_Value + { + public int data = 0; + override public NBT_Int toInt () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_INT; } + } + + public class NBT_Long : NBT_Value + { + public long data = 0; + override public NBT_Long toLong () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_LONG; } + } + + public class NBT_Float : NBT_Value + { + public float data = 0; + override public NBT_Float toFloat () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_FLOAT; } + } + + public class NBT_Double : NBT_Value + { + public double data = 0; + override public NBT_Double toDouble () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_DOUBLE; } + } + + public class NBT_ByteArray : NBT_Value + { + public int length = 0; + public byte[] data = null; + + override public NBT_ByteArray toByteArray () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_BYTE_ARRAY; } + } + + public class NBT_String : NBT_Value + { + public String data = null; + override public NBT_String toString () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_STRING; } + } + + public class NBT_List : NBT_Value + { + public int length = 0; + public NBT_Type type = NBT_Type.TAG_END; + + public List items = null; + + override public NBT_List toList () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_LIST; } + } + + public class NBT_Compound : NBT_Value + { + public int length = 0; + + public List tags = null; + + override public NBT_Compound toCompound () { return this; } + override public NBT_Type getType () { return NBT_Type.TAG_COMPOUND; } + } + + public class NBT_Tag + { + public NBT_Type type; + public NBT_String name; + + public NBT_Value value; + + public NBT_Tag findTagByName (String name) + { + if (type != NBT_Type.TAG_COMPOUND) { + return null; + } + + foreach (NBT_Tag tag in value.toCompound().tags) { + if (tag.name.data.Equals(name)) { + return tag; + } + } + + return null; + } + } + + public class NBT_Tree + { + private Stream stream = null; + + //String path = null; + + NBT_Tag root = null; + + public NBT_Tree (Stream s) + { + ReadFrom(s); + } + + public void ReadFrom (Stream s) + { + if (s != null) { + stream = s; + root = ReadTag(); + stream = null; + } + } + + public void WriteTo (Stream s) + { + if (s != null) { + stream = s; + + if (root != null) { + WriteTag(root); + } + + stream = null; + } + } + + /*public void activate () + { + if (root == null) { + read(); + } + } + + public void deactivate () + { + if (root != null) { + root = null; + } + } + + public void save () + { + if (root != null) { + write(); + } + }*/ + + public NBT_Tag getRoot () + { + return root; + } + + /*public void addListItem (NBT_Tag tag, NBT_Value val) + { + if (tag.type != NBT_Type.TAG_LIST) { + throw new Exception(); + } + + if (tag.value.toList().type != val.getType()) { + throw new Exception(); + } + + val.toList().length++; + val.toList().items.Add(val); + } + + public void addTag (NBT_Tag tag, NBT_Tag sub) + { + if (tag.type != NBT_Type.TAG_COMPOUND) { + throw new Exception(); + } + + tag.value.toCompound().length++; + tag.value.toCompound().tags.Add(sub); + }*/ + + /*private void read () + { + FileStream fStream = new FileStream(path, System.IO.FileMode.Open); + gzStream = new GZipStream(fStream, CompressionMode.Decompress); + + root = readTag(); + + gzStream.Close(); + + gzStream = null; + }*/ + + private NBT_Value ReadValue (NBT_Type type) + { + switch (type) { + case NBT_Type.TAG_END: + return null; + + case NBT_Type.TAG_BYTE: + return ReadByte(); + + case NBT_Type.TAG_SHORT: + return ReadShort(); + + case NBT_Type.TAG_INT: + return ReadInt(); + + case NBT_Type.TAG_LONG: + return ReadLong(); + + case NBT_Type.TAG_FLOAT: + return ReadFloat(); + + case NBT_Type.TAG_DOUBLE: + return ReadDouble(); + + case NBT_Type.TAG_BYTE_ARRAY: + return ReadByteArray(); + + case NBT_Type.TAG_STRING: + return ReadString(); + + case NBT_Type.TAG_LIST: + return ReadList(); + + case NBT_Type.TAG_COMPOUND: + return ReadCompound(); + } + + throw new Exception(); + } + + private NBT_Value ReadByte () + { + //Console.Write("NBT_File.readByte()"); + + int gzByte = stream.ReadByte(); + if (gzByte == -1) { + throw new NBTException(NBTException.MSG_GZIP_ENDOFSTREAM); + } + + NBT_Byte val = new NBT_Byte(); + val.data = (byte)gzByte; + + //Console.WriteLine(" [" + val.data + "]"); + + return val; + } + + private NBT_Value ReadShort () + { + //Console.Write("NBT_File.readShort"); + + byte[] gzBytes = new byte[2]; + stream.Read(gzBytes, 0, 2); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(gzBytes); + } + + NBT_Short val = new NBT_Short(); + val.data = BitConverter.ToInt16(gzBytes, 0); + + //Console.WriteLine(" [" + val.data + "]"); + + return val; + } + + private NBT_Value ReadInt () + { + //Console.Write("NBT_File.readInt"); + + byte[] gzBytes = new byte[4]; + stream.Read(gzBytes, 0, 4); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(gzBytes); + } + + NBT_Int val = new NBT_Int(); + val.data = BitConverter.ToInt32(gzBytes, 0); + + //Console.WriteLine(" [" + val.data + "]"); + + return val; + } + + private NBT_Value ReadLong () + { + //Console.Write("NBT_File.readLong"); + + byte[] gzBytes = new byte[8]; + stream.Read(gzBytes, 0, 8); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(gzBytes); + } + + NBT_Long val = new NBT_Long(); + val.data = BitConverter.ToInt64(gzBytes, 0); + + //Console.WriteLine(" [" + val.data + "]"); + + return val; + } + + private NBT_Value ReadFloat () + { + //Console.Write("NBT_File.readFloat()"); + + byte[] gzBytes = new byte[4]; + stream.Read(gzBytes, 0, 4); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(gzBytes); + } + + NBT_Float val = new NBT_Float(); + val.data = BitConverter.ToSingle(gzBytes, 0); + + //Console.WriteLine(" [" + val.data + "]"); + + return val; + } + + private NBT_Value ReadDouble () + { + //Console.Write("NBT_File.readDouble()"); + + byte[] gzBytes = new byte[8]; + stream.Read(gzBytes, 0, 8); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(gzBytes); + } + + NBT_Double val = new NBT_Double(); + val.data = BitConverter.ToDouble(gzBytes, 0); + + //Console.WriteLine(" [" + val.data + "]"); + + return val; + } + + private NBT_Value ReadByteArray () + { + //Console.Write("NBT_File.readByteArray()"); + + byte[] lenBytes = new byte[4]; + stream.Read(lenBytes, 0, 4); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(lenBytes); + } + + NBT_ByteArray val = new NBT_ByteArray(); + val.length = BitConverter.ToInt32(lenBytes, 0); + + if (val.length < 0) { + throw new NBTException(NBTException.MSG_READ_NEG); + } + + //Console.WriteLine(" [" + val.length + "]"); + + val.data = new byte[val.length]; + stream.Read(val.data, 0, val.length); + + return val; + } + + private NBT_Value ReadString () + { + //Console.Write("NBT_File.readString()"); + + byte[] lenBytes = new byte[2]; + stream.Read(lenBytes, 0, 2); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(lenBytes); + } + + short len = BitConverter.ToInt16(lenBytes, 0); + if (len < 0) { + throw new NBTException(NBTException.MSG_READ_NEG); + } + + byte[] strBytes = new byte[len]; + stream.Read(strBytes, 0, len); + + System.Text.Encoding str = Encoding.GetEncoding(28591); + + NBT_String val = new NBT_String(); + val.data = str.GetString(strBytes); + + //Console.WriteLine(" [" + val.data.ToString() + "]"); + + return val; + } + + private NBT_Value ReadList () + { + //Console.Write("NBT_File.readList()"); + + int gzByte = stream.ReadByte(); + if (gzByte == -1) { + throw new NBTException(NBTException.MSG_GZIP_ENDOFSTREAM); + } + + NBT_List val = new NBT_List(); + val.type = (NBT_Type)gzByte; + if (val.type > (NBT_Type)Enum.GetValues(typeof(NBT_Type)).GetUpperBound(0)) { + throw new NBTException(NBTException.MSG_READ_TYPE); + } + + byte[] lenBytes = new byte[4]; + stream.Read(lenBytes, 0, 4); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(lenBytes); + } + + val.length = BitConverter.ToInt32(lenBytes, 0); + if (val.length < 0) { + throw new NBTException(NBTException.MSG_READ_NEG); + } + + //Console.WriteLine(" [" + val.type + ", " + val.length + "]"); + + val.items = new List(); + + for (int i = 0; i < val.length; i++) { + val.items.Add(ReadValue(val.type)); + } + + return val; + } + + private NBT_Value ReadCompound () + { + //Console.WriteLine("NBT_File.readCompound()"); + + NBT_Compound val = new NBT_Compound(); + val.tags = new List(); + + while (true) { + NBT_Tag tag = ReadTag(); + if (tag.type == NBT_Type.TAG_END) { + break; + } + + val.length++; + val.tags.Add(tag); + } + + return val; + } + + private NBT_Tag ReadTag () + { + //Console.Write("NBT_File.readTag()"); + + NBT_Tag tag = new NBT_Tag(); + + tag.type = (NBT_Type)stream.ReadByte(); + tag.name = null; + tag.value = null; + + //Console.WriteLine(" [" + tag.type + "]"); + + if (tag.type != NBT_Type.TAG_END) { + tag.name = ReadString().toString(); + } + + tag.value = ReadValue(tag.type); + + return tag; + } + + /*private void write () + { + if (root != null) { + FileStream fStream = new FileStream(path, System.IO.FileMode.Truncate); + stream = new GZipStream(fStream, CompressionMode.Compress); + + writeTag(root); + + stream.Close(); + } + + stream = null; + }*/ + + private void WriteValue (NBT_Value val) + { + switch (val.getType()) { + case NBT_Type.TAG_END: + break; + + case NBT_Type.TAG_BYTE: + WriteByte(val.toByte()); + break; + + case NBT_Type.TAG_SHORT: + WriteShort(val.toShort()); + break; + + case NBT_Type.TAG_INT: + WriteInt(val.toInt()); + break; + + case NBT_Type.TAG_LONG: + WriteLong(val.toLong()); + break; + + case NBT_Type.TAG_FLOAT: + WriteFloat(val.toFloat()); + break; + + case NBT_Type.TAG_DOUBLE: + WriteDouble(val.toDouble()); + break; + + case NBT_Type.TAG_BYTE_ARRAY: + WriteByteArray(val.toByteArray()); + break; + + case NBT_Type.TAG_STRING: + WriteString(val.toString()); + break; + + case NBT_Type.TAG_LIST: + WriteList(val.toList()); + break; + + case NBT_Type.TAG_COMPOUND: + WriteCompound(val.toCompound()); + break; + } + } + + private void WriteByte (NBT_Byte val) + { + stream.WriteByte(val.data); + } + + private void WriteShort (NBT_Short val) + { + byte[] gzBytes = BitConverter.GetBytes(val.data); + + if (BitConverter.IsLittleEndian) { + //gzBytes.Reverse(); + Array.Reverse(gzBytes); + } + + stream.Write(gzBytes, 0, 2); + } + + private void WriteInt (NBT_Int val) + { + byte[] gzBytes = BitConverter.GetBytes(val.data); + + if (BitConverter.IsLittleEndian) { + //gzBytes.Reverse(); + Array.Reverse(gzBytes); + } + + stream.Write(gzBytes, 0, 4); + } + + private void WriteLong (NBT_Long val) + { + byte[] gzBytes = BitConverter.GetBytes(val.data); + + if (BitConverter.IsLittleEndian) { + //gzBytes.Reverse(); + Array.Reverse(gzBytes); + } + + stream.Write(gzBytes, 0, 8); + } + + private void WriteFloat (NBT_Float val) + { + byte[] gzBytes = BitConverter.GetBytes(val.data); + + if (BitConverter.IsLittleEndian) { + //gzBytes.Reverse(); + Array.Reverse(gzBytes); + } + + stream.Write(gzBytes, 0, 4); + } + + private void WriteDouble (NBT_Double val) + { + byte[] gzBytes = BitConverter.GetBytes(val.data); + + if (BitConverter.IsLittleEndian) { + //gzBytes.Reverse(); + Array.Reverse(gzBytes); + } + + stream.Write(gzBytes, 0, 8); + } + + private void WriteByteArray (NBT_ByteArray val) + { + byte[] lenBytes = BitConverter.GetBytes(val.length); + + if (BitConverter.IsLittleEndian) { + //lenBytes.Reverse(); + Array.Reverse(lenBytes); + } + + stream.Write(lenBytes, 0, 4); + stream.Write(val.data, 0, val.length); + } + + private void WriteString (NBT_String val) + { + byte[] lenBytes = BitConverter.GetBytes((short)val.data.Length); + + if (BitConverter.IsLittleEndian) { + //lenBytes.Reverse(); + Array.Reverse(lenBytes); + } + + stream.Write(lenBytes, 0, 2); + + System.Text.Encoding str = Encoding.GetEncoding(28591); + byte[] gzBytes = str.GetBytes(val.data); + + stream.Write(gzBytes, 0, gzBytes.Length); + } + + private void WriteList (NBT_List val) + { + byte[] lenBytes = BitConverter.GetBytes(val.length); + + if (BitConverter.IsLittleEndian) { + //lenBytes.Reverse(); + Array.Reverse(lenBytes); + } + + stream.WriteByte((byte)val.type); + stream.Write(lenBytes, 0, 4); + + foreach (NBT_Value v in val.items) { + WriteValue(v); + } + } + + private void WriteCompound (NBT_Compound val) + { + foreach (NBT_Tag t in val.tags) { + WriteTag(t); + } + + NBT_Tag e = new NBT_Tag(); + e.type = NBT_Type.TAG_END; + + WriteTag(e); + } + + private void WriteTag (NBT_Tag tag) + { + stream.WriteByte((byte)tag.type); + + if (tag.type != NBT_Type.TAG_END) { + WriteString(tag.name); + WriteValue(tag.value); + } + } + } + + class NBTException : Exception + { + public const String MSG_GZIP_ENDOFSTREAM = "Gzip Error: Unexpected end of stream"; + + public const String MSG_READ_NEG = "Read Error: Negative length"; + public const String MSG_READ_TYPE = "Read Error: Invalid value type"; + + public NBTException () { } + + public NBTException (String msg) : base(msg) { } + + public NBTException (String msg, Exception innerException) : base(msg, innerException) { } + } +} diff --git a/NBToolkit/NBToolkit/NBToolkit.csproj b/NBToolkit/NBToolkit/NBToolkit.csproj new file mode 100644 index 0000000..66dc885 --- /dev/null +++ b/NBToolkit/NBToolkit/NBToolkit.csproj @@ -0,0 +1,131 @@ + + + + Debug + AnyCPU + 9.0.30729 + 2.0 + {AFE30E14-3F2F-4461-9F7D-147AB4DCA4C3} + Exe + Properties + NBToolkit + NBToolkit + v3.5 + 512 + publish\ + true + Disk + false + Foreground + 7 + Days + false + false + true + 0 + 1.0.0.%2a + false + false + true + + + true + full + false + bin\Debug\ + DEBUG;TRACE + prompt + 4 + + + pdbonly + true + bin\Release\ + TRACE + prompt + 4 + + + + + 3.5 + + + 3.5 + + + 3.5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + False + .NET Framework Client Profile + false + + + False + .NET Framework 2.0 %28x86%29 + false + + + False + .NET Framework 3.0 %28x86%29 + false + + + False + .NET Framework 3.5 + false + + + False + .NET Framework 3.5 SP1 + true + + + False + Windows Installer 3.1 + true + + + + + \ No newline at end of file diff --git a/NBToolkit/NBToolkit/NBToolkit.csproj.user b/NBToolkit/NBToolkit/NBToolkit.csproj.user new file mode 100644 index 0000000..83e6969 --- /dev/null +++ b/NBToolkit/NBToolkit/NBToolkit.csproj.user @@ -0,0 +1,24 @@ + + + replace -b 18 -a 19 -w "F:\Minecraft\tps - Copy (10)" -v -cxa -1 -cxb -1 -cza 0 -czb 0 + + + replace -b 18 -a 19 -w "F:\Minecraft\tps - Copy (10)" -v -cxa -1 -cxb -1 -cza 0 -czb 0 + + + + + + + + + + + + + + + en-US + false + + \ No newline at end of file diff --git a/NBToolkit/NBToolkit/NBToolkit.sln b/NBToolkit/NBToolkit/NBToolkit.sln new file mode 100644 index 0000000..62d340a --- /dev/null +++ b/NBToolkit/NBToolkit/NBToolkit.sln @@ -0,0 +1,20 @@ + +Microsoft Visual Studio Solution File, Format Version 10.00 +# Visual C# Express 2008 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NBToolkit", "NBToolkit.csproj", "{AFE30E14-3F2F-4461-9F7D-147AB4DCA4C3}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {AFE30E14-3F2F-4461-9F7D-147AB4DCA4C3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {AFE30E14-3F2F-4461-9F7D-147AB4DCA4C3}.Debug|Any CPU.Build.0 = Debug|Any CPU + {AFE30E14-3F2F-4461-9F7D-147AB4DCA4C3}.Release|Any CPU.ActiveCfg = Release|Any CPU + {AFE30E14-3F2F-4461-9F7D-147AB4DCA4C3}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/NBToolkit/NBToolkit/NBToolkit.suo b/NBToolkit/NBToolkit/NBToolkit.suo new file mode 100644 index 0000000000000000000000000000000000000000..1bff7a03093311ed174eb1583056c82cbc009028 GIT binary patch literal 38912 zcmeHQYiwM_6~0a&K+B_n5MC)b!9WP{;&+}TvDa(oX`9$dTv!SlXZ?uRYh!O7K+@8R zwh@g|)FM<-`V=ZDjapIV2jWku+6oC;9+g`4rK+^5T2*~XRm-p9e&5WUy^o!}_uALH zcb#Oc<9qMy?42`b&N*}D%$eD@-&*|pufKHTyM`-mG8P)|PhVy%vgKFdo*>eP3}XRq z2=7l%Ps>ch!0m-Jk0|gz;Cc}{ZwcUH0Q>tAz@>o803QT=0I(Qh_%*@HEZb+Kj45N(*lCR5nltv`e;PQc z{3)Lo)))(_$oG_;p!HLSQofk>qP{-S2N`2Ou1AetQU77!Gk`n4v;y^078DKq%JXyT zY|1>7;%#+hJE#NHGj#y_m3n|O$b8B_uha*<044v_A*6*}{;40RCq4q;cc=>}|M#2E z%-e%|CI5U@^3Qw9vC;vQ|622VO8!|!$v^Lv{PSMPKkt?N^Ipk6@7?mxJi881^1lQ1 z?gZ=tbOE{nJ%D=wy8--OAFlm?z2@@+=Jh`Fnl!H;HLnlhnlkSPa6Mq&58^sx-jCpV z(7ey!I%eLF<2qs9PvS~GB@TyhJ#5}nk|@p0e+<`S<~<(sY10?BJoAI$mpjj8$o=-tXrO-2J^i&c( zF@onbN=+hn5V|8N^yeYuj^e%xaJTV+8NZ5G;0LF;GPV(sq`Z9V|58%M893S3N7wHMaUdilPf^|S0I zGQKTwOn4d)7jhs+?XahWhWP(S}5O&(0Ui@!SGno9y|t>u95q)$IA47w!W78G$9GJtbu1 z5HzTwAZK0F`Q*4J{C^Cyt`1PX5xO9O8C(;rx<=$Q0BZ5n1PtnNw+_#1z@fFG%o=>F z4WG56FIsVp<2%QpPY!@Pob`4K?QO61D%9#Mqi~bG%d(e>dFNc>wn5A;=kkp)*qUk3 znj<9VDaTMl7Lrm8YV=_45`)HU6WWs_cpFO6rnIQPxq$j!{HNwZ?c$9oe77*g!2g^F zaDEV!|JfTgWF#bxa~bfD;&F})oC%J=9^v@MkwA{avb_605%Bt}pj@Gj;r^L9K1~0p@aMtI3;{5XtWL^UZ zTEAWYhLt``f6tT`l_sFnCm`odLKoD6YkA)&q`n@~UWdC@@mVYG>hRYJh(kNn;hUT_ zg^~Yzt7-qIh1GKeyyv$5r(lz0Azx$YrI}d&S@2a3?<03B>tB~AHR_33V^9Nnv6fMR zp~Rjx80l0mtSx%TsH64x_rFE!zyPk!*-I-ZK^?-;N9xEVs7?BeiT6rvnlqk4&xDyd zoe25;Ff%VX8;hZxoawy#r8kz+lRznAGSF`S+21n};?J>Q$kdL?Zc(jy_SH{avdqyM z+Ik6a%O3bQH^Bbc4LRbdA^mT(gC@YM>|t6%s(<}FP}D!wsE7JYQVlmgiFjRa>i<2E zD%P5U9CKc+@R2;{KLe%yt;737mx*5UsT?2M_1{*|?|`T~i#BI~4Y{1-CaDmt{u;cK zey*VHx7Xh-#=Z=&$O;&;QNA5H^zC>h(qFYbX#LCa!Y7C{(%kwybXr4~XV)ZB`e;8t z0e++|QFg}^?ze+JQ^E$w;;9upIsrbWRgl8-PW06P?lQRVLywK&Q*tnEP+CQ9Zjz<= zKKUljM2ArWJvK^mXvusfLaDwFtr@}Sku=mDEdui6Kpsb0=%*_RM_Qe7k{5R`N3$Nr+Pg#6tA4XLQZ zu{KI=rsE^E)~0Zz)pe=_T%_~1>+-sV#-jf_o2MJsiBFaQCtcs^@+{q;sKgoG={e_f z_v|64-3MeuS_b=rrrbqm9E@v%!kJdrV{Pwl zbYPsq94C#DNv<3{TM_arqo?Re<_t$k@!PB7-K4lhZMD8@tum?4E?mz~3OpAf|8~K; zVw4G^W#pQjinvf5{No$G{BK5%aRjA>$<<0~bw*Hdg_8b%)o=81yQ2~8nqGPL8D-E2 z8EHnXy~dI&|M=azH>cXadVI;FXMg+r4;KpgIRIZlM%2&?Khl(tD*7|1AL!kn#mm&$ zrflC0iS@~SGlt4fE@B^sYj@*3AO1vDApJiK?fl&@$okn&^AwcS{dfAyA{UVa%% z$~YFrc-!L=?pGq}%BMxk*cYyu+RNin`0(9v57aiCD{=;6FV7efrl1_CvtOlmT18nY zo%e^o-!rIY)6{KpZLZk&!}?9w279|0M@O)?7ot)$;IRWbcqWk;Ty1Ox_inKcTlPl{iIu75Ql62LEU z{qIT?qB})Oi{Cfoev{|SV&49jwZ1P?@gI!o2wMMbDEnfd`Z@3OPP6g<(D!!Me69Zq znJmHal3uwHAuC*oW6TOy-1fs#dL+MpLuoPU(UTC^a9qXigItA&kL2qW!V zF;+0jkh*3{jKK75wgL`EV1dxM+YC5brn=qZzTAZs#fop?3KI24xmK}?tU+i5!R|dV)Y{oyvKc6vQ>s`N$o%6y#jsGi9 zC}{in)*$15bFlTF8OQ%H^73V~QQqz6ZaVKo`g4t$(ipV;wCY|3FnQNcOuf@=`k!;f zVEotUT{%8w|8q~{azN1bb8Y!M04A^Yw*-?vQ7`Lel5h3?zXc!v#4O^p|J<72`#t;= z`@gD|;3{hmR)0InFXoq#`e8w4u)fO3l2QDpXTa$Tl;w*3pH2K*0xw%@`Ukw}A58wa zo=clLDE;@L>^A^R-t`At|GgdWSqKPP|AQ!d8o(4Lb~mUsZ1=tmqfYv3PDNlxSTPla z@mBOpoQVJ@S`pkkr}p|W=Fx78aYc};I*fv3jGbkTQG&upPq*PM<%)9&)~?mSsNZ8& z633k7DD;j~bCGV+pVA46GFEf|{~3dyG_NuEfaNMel(RsIwcF=MizIws7VbgTS^By> z>0MOTEA@FLuWQmO`J@Mj%y!4Njxv@%VHc#7p2AFi2131Ciutn+54-Hhh-q5UrHEe8 z)vn94V^myAT?Z_qoTaHXeqzX#J4SsdKBsjTg$Ke;IoX#qM#6e>b)&Ja-*kBvm&-_k z@jQ*_+kG;EaZzd{Ys6N4cB$Z}^7|F-ede~36>eO=DA!o3>%~IP7QNEt@@y6Bv6aAq z(K2&wY3eB89LG2(FNY6W*6Q5rryu7zQ;5>~aL%k0-*T1VSebfA@5@^S3?45N2E}z9 z*Mv*KsXG3!uf)sM>nL{*6qDU*P$YW13iTR2E~Stooleo^>A9Fb&~n*>g(KtgGS*($ zBaEvkg$1LN9bKNX^Q>4}rQPU5EZ5x1zPiJpiNapG6WB06{JJRP*mzFWI`{=Tuzw*2 z-NpTns@;rS@%OUe702+$NQV}n#jW>ExL+%_+9$+4kK|yGM?vfb@*7Z+8z!1@-y*(! z9M#r0G&VK29LIY%ilDqHOvcp)h%al5oc#Sm8Ibr1jkIsI|t|!WlDHilSIS)LUZ z4hUu|*ww8Y7(Xj#Duq5Hogfv@Q`+WkyfMlVS>MBq3M!2+k?|9h=u&uKic{M;$0!{0 zbY2d|FqOgsqqB>2d0zT}r%E9OJ1S)Y4xDi;5Xr4OlJQD4c7@UF4+V)E6k8309B_3^ zb7-V&yQIfKjHYQ}P8BGQD0df@b2j^xcYd~b z9H$BQRy39|v2w#Gn%8f9;WY6D#p723XL^zJk*@eqBX5Fv(P@wS%*&{e~~;r@Go3H3~b9RAZCu#GoN@~Xpq8zPahmysGEkx zGMqDzuM+i8p!nrV(L*8oB!}Pu9vbI7;-fUZ5?oG%h2W5rzVk><*eBfNq7Lv=L9eCg zl`wtMbsqJJdq!M}*?*Y6h>5<4cuqWL$9=o0Ejj;!|DhB|s}I<=_zlA*rcY}wDP0e1 zYM%Rg=8UiHv>vE9TCX~%bclVSs1#h?2>DGiT;B*IH+#<`n^V_ErSjf|>65MJQJ+X(2YUgDyUo*BxlTK#syvZ( z93HBJh@iE6Rl47>iMhUQe;@Ddi6>mx&+k&d?x6cEkr)h?1kQE!JH>-hnS&5|`6~o5 zoCnS`4jQpU$H9~5>JFmB6g^)^oozd}Me8%P{Pv{lAevgB^CuMrH5s#cA@-6y>3U& zD}JgMWh7c2+ztHHq(OxxxzDza9!sTTtcW(fq-={+QuJr5lX{h6V<|dPSM%5WxSvk` zhLy174FeVkd=*|LP{& z%o#21_AIQEGFq2J?3Q z2B`DoO?gg$4iBCWTB-C?SaZL5)_52#ETrR51>>)NX(t8^Dt~jwe)ciBkH1eT$5mb1 z=F;2a&m#&)9VXo4R6cHX_VRp`be`Yll)%EXZ+C7x*LasZc$}@acuL(BmjG%;CThoM zIlr*?6!vx;qmT*4BHAfz$0y1Wg%Ol?ykh3bG{@Y`u}Oj0eRma?EX zo*k3#N%_Rvzvil25bXWVpF(ViPvZG&t5Amf8iSrcwI5}_X};I{ z{Hs9c&&~M!JFas+AEc++s#vpg0_DcWlxxI^J;%Hc5eA4RsXFh7_|OT zl>M~%;!^Ba*72`96w)x>dGD7G5s9E!n+UruJi~IXPnOW(Zx4B6|9$S67k@)k!eJlX z*}qHf{LFudzZd_tVBNznQT|B-mHf){^DIE|pI80O2;lTftp64ix(h%Y{L1t5hXGHR z1-$N3g3oD%cTG4Yvk{P%g}9kmfo4yuYNCV3!=iGflq6e^*HColcS-GqK=Hv0cjyj1BQzxN0ET +// +// Copyright (C) 2008 Novell (http://www.novell.com) +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// + +// Compile With: +// gmcs -debug+ -r:System.Core Options.cs -o:NDesk.Options.dll +// gmcs -debug+ -d:LINQ -r:System.Core Options.cs -o:NDesk.Options.dll +// +// The LINQ version just changes the implementation of +// OptionSet.Parse(IEnumerable), and confers no semantic changes. + +// +// A Getopt::Long-inspired option parsing library for C#. +// +// NDesk.Options.OptionSet is built upon a key/value table, where the +// key is a option format string and the value is a delegate that is +// invoked when the format string is matched. +// +// Option format strings: +// Regex-like BNF Grammar: +// name: .+ +// type: [=:] +// sep: ( [^{}]+ | '{' .+ '}' )? +// aliases: ( name type sep ) ( '|' name type sep )* +// +// Each '|'-delimited name is an alias for the associated action. If the +// format string ends in a '=', it has a required value. If the format +// string ends in a ':', it has an optional value. If neither '=' or ':' +// is present, no value is supported. `=' or `:' need only be defined on one +// alias, but if they are provided on more than one they must be consistent. +// +// Each alias portion may also end with a "key/value separator", which is used +// to split option values if the option accepts > 1 value. If not specified, +// it defaults to '=' and ':'. If specified, it can be any character except +// '{' and '}' OR the *string* between '{' and '}'. If no separator should be +// used (i.e. the separate values should be distinct arguments), then "{}" +// should be used as the separator. +// +// Options are extracted either from the current option by looking for +// the option name followed by an '=' or ':', or is taken from the +// following option IFF: +// - The current option does not contain a '=' or a ':' +// - The current option requires a value (i.e. not a Option type of ':') +// +// The `name' used in the option format string does NOT include any leading +// option indicator, such as '-', '--', or '/'. All three of these are +// permitted/required on any named option. +// +// Option bundling is permitted so long as: +// - '-' is used to start the option group +// - all of the bundled options are a single character +// - at most one of the bundled options accepts a value, and the value +// provided starts from the next character to the end of the string. +// +// This allows specifying '-a -b -c' as '-abc', and specifying '-D name=value' +// as '-Dname=value'. +// +// Option processing is disabled by specifying "--". All options after "--" +// are returned by OptionSet.Parse() unchanged and unprocessed. +// +// Unprocessed options are returned from OptionSet.Parse(). +// +// Examples: +// int verbose = 0; +// OptionSet p = new OptionSet () +// .Add ("v", v => ++verbose) +// .Add ("name=|value=", v => Console.WriteLine (v)); +// p.Parse (new string[]{"-v", "--v", "/v", "-name=A", "/name", "B", "extra"}); +// +// The above would parse the argument string array, and would invoke the +// lambda expression three times, setting `verbose' to 3 when complete. +// It would also print out "A" and "B" to standard output. +// The returned array would contain the string "extra". +// +// C# 3.0 collection initializers are supported and encouraged: +// var p = new OptionSet () { +// { "h|?|help", v => ShowHelp () }, +// }; +// +// System.ComponentModel.TypeConverter is also supported, allowing the use of +// custom data types in the callback type; TypeConverter.ConvertFromString() +// is used to convert the value option to an instance of the specified +// type: +// +// var p = new OptionSet () { +// { "foo=", (Foo f) => Console.WriteLine (f.ToString ()) }, +// }; +// +// Random other tidbits: +// - Boolean options (those w/o '=' or ':' in the option format string) +// are explicitly enabled if they are followed with '+', and explicitly +// disabled if they are followed with '-': +// string a = null; +// var p = new OptionSet () { +// { "a", s => a = s }, +// }; +// p.Parse (new string[]{"-a"}); // sets v != null +// p.Parse (new string[]{"-a+"}); // sets v != null +// p.Parse (new string[]{"-a-"}); // sets v == null +// + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Collections.ObjectModel; +using System.ComponentModel; +using System.Globalization; +using System.IO; +using System.Runtime.Serialization; +using System.Security.Permissions; +using System.Text; +using System.Text.RegularExpressions; + +#if LINQ +using System.Linq; +#endif + +#if TEST +using NDesk.Options; +#endif + +namespace NDesk.Options { + + public class OptionValueCollection : IList, IList { + + List values = new List (); + OptionContext c; + + internal OptionValueCollection (OptionContext c) + { + this.c = c; + } + + #region ICollection + void ICollection.CopyTo (Array array, int index) {(values as ICollection).CopyTo (array, index);} + bool ICollection.IsSynchronized {get {return (values as ICollection).IsSynchronized;}} + object ICollection.SyncRoot {get {return (values as ICollection).SyncRoot;}} + #endregion + + #region ICollection + public void Add (string item) {values.Add (item);} + public void Clear () {values.Clear ();} + public bool Contains (string item) {return values.Contains (item);} + public void CopyTo (string[] array, int arrayIndex) {values.CopyTo (array, arrayIndex);} + public bool Remove (string item) {return values.Remove (item);} + public int Count {get {return values.Count;}} + public bool IsReadOnly {get {return false;}} + #endregion + + #region IEnumerable + IEnumerator IEnumerable.GetEnumerator () {return values.GetEnumerator ();} + #endregion + + #region IEnumerable + public IEnumerator GetEnumerator () {return values.GetEnumerator ();} + #endregion + + #region IList + int IList.Add (object value) {return (values as IList).Add (value);} + bool IList.Contains (object value) {return (values as IList).Contains (value);} + int IList.IndexOf (object value) {return (values as IList).IndexOf (value);} + void IList.Insert (int index, object value) {(values as IList).Insert (index, value);} + void IList.Remove (object value) {(values as IList).Remove (value);} + void IList.RemoveAt (int index) {(values as IList).RemoveAt (index);} + bool IList.IsFixedSize {get {return false;}} + object IList.this [int index] {get {return this [index];} set {(values as IList)[index] = value;}} + #endregion + + #region IList + public int IndexOf (string item) {return values.IndexOf (item);} + public void Insert (int index, string item) {values.Insert (index, item);} + public void RemoveAt (int index) {values.RemoveAt (index);} + + private void AssertValid (int index) + { + if (c.Option == null) + throw new InvalidOperationException ("OptionContext.Option is null."); + if (index >= c.Option.MaxValueCount) + throw new ArgumentOutOfRangeException ("index"); + if (c.Option.OptionValueType == OptionValueType.Required && + index >= values.Count) + throw new OptionException (string.Format ( + c.OptionSet.MessageLocalizer ("Missing required value for option '{0}'."), c.OptionName), + c.OptionName); + } + + public string this [int index] { + get { + AssertValid (index); + return index >= values.Count ? null : values [index]; + } + set { + values [index] = value; + } + } + #endregion + + public List ToList () + { + return new List (values); + } + + public string[] ToArray () + { + return values.ToArray (); + } + + public override string ToString () + { + return string.Join (", ", values.ToArray ()); + } + } + + public class OptionContext { + private Option option; + private string name; + private int index; + private OptionSet set; + private OptionValueCollection c; + + public OptionContext (OptionSet set) + { + this.set = set; + this.c = new OptionValueCollection (this); + } + + public Option Option { + get {return option;} + set {option = value;} + } + + public string OptionName { + get {return name;} + set {name = value;} + } + + public int OptionIndex { + get {return index;} + set {index = value;} + } + + public OptionSet OptionSet { + get {return set;} + } + + public OptionValueCollection OptionValues { + get {return c;} + } + } + + public enum OptionValueType { + None, + Optional, + Required, + } + + public abstract class Option { + string prototype, description; + string[] names; + OptionValueType type; + int count; + string[] separators; + + protected Option (string prototype, string description) + : this (prototype, description, 1) + { + } + + protected Option (string prototype, string description, int maxValueCount) + { + if (prototype == null) + throw new ArgumentNullException ("prototype"); + if (prototype.Length == 0) + throw new ArgumentException ("Cannot be the empty string.", "prototype"); + if (maxValueCount < 0) + throw new ArgumentOutOfRangeException ("maxValueCount"); + + this.prototype = prototype; + this.names = prototype.Split ('|'); + this.description = description; + this.count = maxValueCount; + this.type = ParsePrototype (); + + if (this.count == 0 && type != OptionValueType.None) + throw new ArgumentException ( + "Cannot provide maxValueCount of 0 for OptionValueType.Required or " + + "OptionValueType.Optional.", + "maxValueCount"); + if (this.type == OptionValueType.None && maxValueCount > 1) + throw new ArgumentException ( + string.Format ("Cannot provide maxValueCount of {0} for OptionValueType.None.", maxValueCount), + "maxValueCount"); + if (Array.IndexOf (names, "<>") >= 0 && + ((names.Length == 1 && this.type != OptionValueType.None) || + (names.Length > 1 && this.MaxValueCount > 1))) + throw new ArgumentException ( + "The default option handler '<>' cannot require values.", + "prototype"); + } + + public string Prototype {get {return prototype;}} + public string Description {get {return description;}} + public OptionValueType OptionValueType {get {return type;}} + public int MaxValueCount {get {return count;}} + + public string[] GetNames () + { + return (string[]) names.Clone (); + } + + public string[] GetValueSeparators () + { + if (separators == null) + return new string [0]; + return (string[]) separators.Clone (); + } + + protected static T Parse (string value, OptionContext c) + { + TypeConverter conv = TypeDescriptor.GetConverter (typeof (T)); + T t = default (T); + try { + if (value != null) + t = (T) conv.ConvertFromString (value); + } + catch (Exception e) { + throw new OptionException ( + string.Format ( + c.OptionSet.MessageLocalizer ("Could not convert string `{0}' to type {1} for option `{2}'."), + value, typeof (T).Name, c.OptionName), + c.OptionName, e); + } + return t; + } + + internal string[] Names {get {return names;}} + internal string[] ValueSeparators {get {return separators;}} + + static readonly char[] NameTerminator = new char[]{'=', ':'}; + + private OptionValueType ParsePrototype () + { + char type = '\0'; + List seps = new List (); + for (int i = 0; i < names.Length; ++i) { + string name = names [i]; + if (name.Length == 0) + throw new ArgumentException ("Empty option names are not supported.", "prototype"); + + int end = name.IndexOfAny (NameTerminator); + if (end == -1) + continue; + names [i] = name.Substring (0, end); + if (type == '\0' || type == name [end]) + type = name [end]; + else + throw new ArgumentException ( + string.Format ("Conflicting option types: '{0}' vs. '{1}'.", type, name [end]), + "prototype"); + AddSeparators (name, end, seps); + } + + if (type == '\0') + return OptionValueType.None; + + if (count <= 1 && seps.Count != 0) + throw new ArgumentException ( + string.Format ("Cannot provide key/value separators for Options taking {0} value(s).", count), + "prototype"); + if (count > 1) { + if (seps.Count == 0) + this.separators = new string[]{":", "="}; + else if (seps.Count == 1 && seps [0].Length == 0) + this.separators = null; + else + this.separators = seps.ToArray (); + } + + return type == '=' ? OptionValueType.Required : OptionValueType.Optional; + } + + private static void AddSeparators (string name, int end, ICollection seps) + { + int start = -1; + for (int i = end+1; i < name.Length; ++i) { + switch (name [i]) { + case '{': + if (start != -1) + throw new ArgumentException ( + string.Format ("Ill-formed name/value separator found in \"{0}\".", name), + "prototype"); + start = i+1; + break; + case '}': + if (start == -1) + throw new ArgumentException ( + string.Format ("Ill-formed name/value separator found in \"{0}\".", name), + "prototype"); + seps.Add (name.Substring (start, i-start)); + start = -1; + break; + default: + if (start == -1) + seps.Add (name [i].ToString ()); + break; + } + } + if (start != -1) + throw new ArgumentException ( + string.Format ("Ill-formed name/value separator found in \"{0}\".", name), + "prototype"); + } + + public void Invoke (OptionContext c) + { + OnParseComplete (c); + c.OptionName = null; + c.Option = null; + c.OptionValues.Clear (); + } + + protected abstract void OnParseComplete (OptionContext c); + + public override string ToString () + { + return Prototype; + } + } + + [Serializable] + public class OptionException : Exception { + private string option; + + public OptionException () + { + } + + public OptionException (string message, string optionName) + : base (message) + { + this.option = optionName; + } + + public OptionException (string message, string optionName, Exception innerException) + : base (message, innerException) + { + this.option = optionName; + } + + protected OptionException (SerializationInfo info, StreamingContext context) + : base (info, context) + { + this.option = info.GetString ("OptionName"); + } + + public string OptionName { + get {return this.option;} + } + + [SecurityPermission (SecurityAction.LinkDemand, SerializationFormatter = true)] + public override void GetObjectData (SerializationInfo info, StreamingContext context) + { + base.GetObjectData (info, context); + info.AddValue ("OptionName", option); + } + } + + public delegate void OptionAction (TKey key, TValue value); + + public class OptionSet : KeyedCollection + { + public OptionSet () + : this (delegate (string f) {return f;}) + { + } + + public OptionSet (Converter localizer) + { + this.localizer = localizer; + } + + Converter localizer; + + public Converter MessageLocalizer { + get {return localizer;} + } + + protected override string GetKeyForItem (Option item) + { + if (item == null) + throw new ArgumentNullException ("option"); + if (item.Names != null && item.Names.Length > 0) + return item.Names [0]; + // This should never happen, as it's invalid for Option to be + // constructed w/o any names. + throw new InvalidOperationException ("Option has no names!"); + } + + [Obsolete ("Use KeyedCollection.this[string]")] + protected Option GetOptionForName (string option) + { + if (option == null) + throw new ArgumentNullException ("option"); + try { + return base [option]; + } + catch (KeyNotFoundException) { + return null; + } + } + + protected override void InsertItem (int index, Option item) + { + base.InsertItem (index, item); + AddImpl (item); + } + + protected override void RemoveItem (int index) + { + base.RemoveItem (index); + Option p = Items [index]; + // KeyedCollection.RemoveItem() handles the 0th item + for (int i = 1; i < p.Names.Length; ++i) { + Dictionary.Remove (p.Names [i]); + } + } + + protected override void SetItem (int index, Option item) + { + base.SetItem (index, item); + RemoveItem (index); + AddImpl (item); + } + + private void AddImpl (Option option) + { + if (option == null) + throw new ArgumentNullException ("option"); + List added = new List (option.Names.Length); + try { + // KeyedCollection.InsertItem/SetItem handle the 0th name. + for (int i = 1; i < option.Names.Length; ++i) { + Dictionary.Add (option.Names [i], option); + added.Add (option.Names [i]); + } + } + catch (Exception) { + foreach (string name in added) + Dictionary.Remove (name); + throw; + } + } + + public new OptionSet Add (Option option) + { + base.Add (option); + return this; + } + + sealed class ActionOption : Option { + Action action; + + public ActionOption (string prototype, string description, int count, Action action) + : base (prototype, description, count) + { + if (action == null) + throw new ArgumentNullException ("action"); + this.action = action; + } + + protected override void OnParseComplete (OptionContext c) + { + action (c.OptionValues); + } + } + + public OptionSet Add (string prototype, Action action) + { + return Add (prototype, null, action); + } + + public OptionSet Add (string prototype, string description, Action action) + { + if (action == null) + throw new ArgumentNullException ("action"); + Option p = new ActionOption (prototype, description, 1, + delegate (OptionValueCollection v) { action (v [0]); }); + base.Add (p); + return this; + } + + public OptionSet Add (string prototype, OptionAction action) + { + return Add (prototype, null, action); + } + + public OptionSet Add (string prototype, string description, OptionAction action) + { + if (action == null) + throw new ArgumentNullException ("action"); + Option p = new ActionOption (prototype, description, 2, + delegate (OptionValueCollection v) {action (v [0], v [1]);}); + base.Add (p); + return this; + } + + sealed class ActionOption : Option { + Action action; + + public ActionOption (string prototype, string description, Action action) + : base (prototype, description, 1) + { + if (action == null) + throw new ArgumentNullException ("action"); + this.action = action; + } + + protected override void OnParseComplete (OptionContext c) + { + action (Parse (c.OptionValues [0], c)); + } + } + + sealed class ActionOption : Option { + OptionAction action; + + public ActionOption (string prototype, string description, OptionAction action) + : base (prototype, description, 2) + { + if (action == null) + throw new ArgumentNullException ("action"); + this.action = action; + } + + protected override void OnParseComplete (OptionContext c) + { + action ( + Parse (c.OptionValues [0], c), + Parse (c.OptionValues [1], c)); + } + } + + public OptionSet Add (string prototype, Action action) + { + return Add (prototype, null, action); + } + + public OptionSet Add (string prototype, string description, Action action) + { + return Add (new ActionOption (prototype, description, action)); + } + + public OptionSet Add (string prototype, OptionAction action) + { + return Add (prototype, null, action); + } + + public OptionSet Add (string prototype, string description, OptionAction action) + { + return Add (new ActionOption (prototype, description, action)); + } + + protected virtual OptionContext CreateOptionContext () + { + return new OptionContext (this); + } + +#if LINQ + public List Parse (IEnumerable arguments) + { + bool process = true; + OptionContext c = CreateOptionContext (); + c.OptionIndex = -1; + var def = GetOptionForName ("<>"); + var unprocessed = + from argument in arguments + where ++c.OptionIndex >= 0 && (process || def != null) + ? process + ? argument == "--" + ? (process = false) + : !Parse (argument, c) + ? def != null + ? Unprocessed (null, def, c, argument) + : true + : false + : def != null + ? Unprocessed (null, def, c, argument) + : true + : true + select argument; + List r = unprocessed.ToList (); + if (c.Option != null) + c.Option.Invoke (c); + return r; + } +#else + public List Parse (IEnumerable arguments) + { + OptionContext c = CreateOptionContext (); + c.OptionIndex = -1; + bool process = true; + List unprocessed = new List (); + Option def = Contains ("<>") ? this ["<>"] : null; + foreach (string argument in arguments) { + ++c.OptionIndex; + if (argument == "--") { + process = false; + continue; + } + if (!process) { + Unprocessed (unprocessed, def, c, argument); + continue; + } + if (!Parse (argument, c)) + Unprocessed (unprocessed, def, c, argument); + } + if (c.Option != null) + c.Option.Invoke (c); + return unprocessed; + } +#endif + + private static bool Unprocessed (ICollection extra, Option def, OptionContext c, string argument) + { + if (def == null) { + extra.Add (argument); + return false; + } + c.OptionValues.Add (argument); + c.Option = def; + c.Option.Invoke (c); + return false; + } + + private readonly Regex ValueOption = new Regex ( + @"^(?--|-|/)(?[^:=]+)((?[:=])(?.*))?$"); + + protected bool GetOptionParts (string argument, out string flag, out string name, out string sep, out string value) + { + if (argument == null) + throw new ArgumentNullException ("argument"); + + flag = name = sep = value = null; + Match m = ValueOption.Match (argument); + if (!m.Success) { + return false; + } + flag = m.Groups ["flag"].Value; + name = m.Groups ["name"].Value; + if (m.Groups ["sep"].Success && m.Groups ["value"].Success) { + sep = m.Groups ["sep"].Value; + value = m.Groups ["value"].Value; + } + return true; + } + + protected virtual bool Parse (string argument, OptionContext c) + { + if (c.Option != null) { + ParseValue (argument, c); + return true; + } + + string f, n, s, v; + if (!GetOptionParts (argument, out f, out n, out s, out v)) + return false; + + Option p; + if (Contains (n)) { + p = this [n]; + c.OptionName = f + n; + c.Option = p; + switch (p.OptionValueType) { + case OptionValueType.None: + c.OptionValues.Add (n); + c.Option.Invoke (c); + break; + case OptionValueType.Optional: + case OptionValueType.Required: + ParseValue (v, c); + break; + } + return true; + } + // no match; is it a bool option? + if (ParseBool (argument, n, c)) + return true; + // is it a bundled option? + if (ParseBundledValue (f, string.Concat (n + s + v), c)) + return true; + + return false; + } + + private void ParseValue (string option, OptionContext c) + { + if (option != null) + foreach (string o in c.Option.ValueSeparators != null + ? option.Split (c.Option.ValueSeparators, StringSplitOptions.None) + : new string[]{option}) { + c.OptionValues.Add (o); + } + if (c.OptionValues.Count == c.Option.MaxValueCount || + c.Option.OptionValueType == OptionValueType.Optional) + c.Option.Invoke (c); + else if (c.OptionValues.Count > c.Option.MaxValueCount) { + throw new OptionException (localizer (string.Format ( + "Error: Found {0} option values when expecting {1}.", + c.OptionValues.Count, c.Option.MaxValueCount)), + c.OptionName); + } + } + + private bool ParseBool (string option, string n, OptionContext c) + { + Option p; + string rn; + if (n.Length >= 1 && (n [n.Length-1] == '+' || n [n.Length-1] == '-') && + Contains ((rn = n.Substring (0, n.Length-1)))) { + p = this [rn]; + string v = n [n.Length-1] == '+' ? option : null; + c.OptionName = option; + c.Option = p; + c.OptionValues.Add (v); + p.Invoke (c); + return true; + } + return false; + } + + private bool ParseBundledValue (string f, string n, OptionContext c) + { + if (f != "-") + return false; + for (int i = 0; i < n.Length; ++i) { + Option p; + string opt = f + n [i].ToString (); + string rn = n [i].ToString (); + if (!Contains (rn)) { + if (i == 0) + return false; + throw new OptionException (string.Format (localizer ( + "Cannot bundle unregistered option '{0}'."), opt), opt); + } + p = this [rn]; + switch (p.OptionValueType) { + case OptionValueType.None: + Invoke (c, opt, n, p); + break; + case OptionValueType.Optional: + case OptionValueType.Required: { + string v = n.Substring (i+1); + c.Option = p; + c.OptionName = opt; + ParseValue (v.Length != 0 ? v : null, c); + return true; + } + default: + throw new InvalidOperationException ("Unknown OptionValueType: " + p.OptionValueType); + } + } + return true; + } + + private static void Invoke (OptionContext c, string name, string value, Option option) + { + c.OptionName = name; + c.Option = option; + c.OptionValues.Add (value); + option.Invoke (c); + } + + private const int OptionWidth = 29; + + public void WriteOptionDescriptions (TextWriter o) + { + foreach (Option p in this) { + int written = 0; + if (!WriteOptionPrototype (o, p, ref written)) + continue; + + if (written < OptionWidth) + o.Write (new string (' ', OptionWidth - written)); + else { + o.WriteLine (); + o.Write (new string (' ', OptionWidth)); + } + + List lines = GetLines (localizer (GetDescription (p.Description))); + o.WriteLine (lines [0]); + string prefix = new string (' ', OptionWidth+2); + for (int i = 1; i < lines.Count; ++i) { + o.Write (prefix); + o.WriteLine (lines [i]); + } + } + } + + bool WriteOptionPrototype (TextWriter o, Option p, ref int written) + { + string[] names = p.Names; + + int i = GetNextOptionIndex (names, 0); + if (i == names.Length) + return false; + + if (names [i].Length == 1) { + Write (o, ref written, " -"); + Write (o, ref written, names [0]); + } + else { + Write (o, ref written, " --"); + Write (o, ref written, names [0]); + } + + for ( i = GetNextOptionIndex (names, i+1); + i < names.Length; i = GetNextOptionIndex (names, i+1)) { + Write (o, ref written, ", "); + Write (o, ref written, names [i].Length == 1 ? "-" : "--"); + Write (o, ref written, names [i]); + } + + if (p.OptionValueType == OptionValueType.Optional || + p.OptionValueType == OptionValueType.Required) { + if (p.OptionValueType == OptionValueType.Optional) { + Write (o, ref written, localizer ("[")); + } + Write (o, ref written, localizer ("=" + GetArgumentName (0, p.MaxValueCount, p.Description))); + string sep = p.ValueSeparators != null && p.ValueSeparators.Length > 0 + ? p.ValueSeparators [0] + : " "; + for (int c = 1; c < p.MaxValueCount; ++c) { + Write (o, ref written, localizer (sep + GetArgumentName (c, p.MaxValueCount, p.Description))); + } + if (p.OptionValueType == OptionValueType.Optional) { + Write (o, ref written, localizer ("]")); + } + } + return true; + } + + static int GetNextOptionIndex (string[] names, int i) + { + while (i < names.Length && names [i] == "<>") { + ++i; + } + return i; + } + + static void Write (TextWriter o, ref int n, string s) + { + n += s.Length; + o.Write (s); + } + + private static string GetArgumentName (int index, int maxIndex, string description) + { + if (description == null) + return maxIndex == 1 ? "VALUE" : "VALUE" + (index + 1); + string[] nameStart; + if (maxIndex == 1) + nameStart = new string[]{"{0:", "{"}; + else + nameStart = new string[]{"{" + index + ":"}; + for (int i = 0; i < nameStart.Length; ++i) { + int start, j = 0; + do { + start = description.IndexOf (nameStart [i], j); + } while (start >= 0 && j != 0 ? description [j++ - 1] == '{' : false); + if (start == -1) + continue; + int end = description.IndexOf ("}", start); + if (end == -1) + continue; + return description.Substring (start + nameStart [i].Length, end - start - nameStart [i].Length); + } + return maxIndex == 1 ? "VALUE" : "VALUE" + (index + 1); + } + + private static string GetDescription (string description) + { + if (description == null) + return string.Empty; + StringBuilder sb = new StringBuilder (description.Length); + int start = -1; + for (int i = 0; i < description.Length; ++i) { + switch (description [i]) { + case '{': + if (i == start) { + sb.Append ('{'); + start = -1; + } + else if (start < 0) + start = i + 1; + break; + case '}': + if (start < 0) { + if ((i+1) == description.Length || description [i+1] != '}') + throw new InvalidOperationException ("Invalid option description: " + description); + ++i; + sb.Append ("}"); + } + else { + sb.Append (description.Substring (start, i - start)); + start = -1; + } + break; + case ':': + if (start < 0) + goto default; + start = i + 1; + break; + default: + if (start < 0) + sb.Append (description [i]); + break; + } + } + return sb.ToString (); + } + + private static List GetLines (string description) + { + List lines = new List (); + if (string.IsNullOrEmpty (description)) { + lines.Add (string.Empty); + return lines; + } + int length = 80 - OptionWidth - 2; + int start = 0, end; + do { + end = GetLineEnd (start, length, description); + bool cont = false; + if (end < description.Length) { + char c = description [end]; + if (c == '-' || (char.IsWhiteSpace (c) && c != '\n')) + ++end; + else if (c != '\n') { + cont = true; + --end; + } + } + lines.Add (description.Substring (start, end - start)); + if (cont) { + lines [lines.Count-1] += "-"; + } + start = end; + if (start < description.Length && description [start] == '\n') + ++start; + } while (end < description.Length); + return lines; + } + + private static int GetLineEnd (int start, int length, string description) + { + int end = Math.Min (start + length, description.Length); + int sep = -1; + for (int i = start; i < end; ++i) { + switch (description [i]) { + case ' ': + case '\t': + case '\v': + case '-': + case ',': + case '.': + case ';': + sep = i; + break; + case '\n': + return i; + } + } + if (sep == -1 || end == description.Length) + return end; + return sep; + } + } +} + diff --git a/NBToolkit/NBToolkit/Oregen.cs b/NBToolkit/NBToolkit/Oregen.cs new file mode 100644 index 0000000..bdbbc74 --- /dev/null +++ b/NBToolkit/NBToolkit/Oregen.cs @@ -0,0 +1,320 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using NDesk.Options; +using NBT; + +namespace NBToolkit +{ + public class OregenOptions : TKOptions + { + private OptionSet filterOpt = null; + + public int? OPT_ID = null; + + public int? OPT_DATA = null; + + public int? OPT_ROUNDS = null; + public int? OPT_SIZE = null; + public int? OPT_MIN = null; + public int? OPT_MAX = null; + + public bool OPT_OO = false; + public bool OPT_OA = false; + + public List OPT_OB_INCLUDE = new List(); + public List OPT_OB_EXCLUDE = new List(); + + private class OreType + { + public int id; + public string name; + public int rounds; + public int min; + public int max; + public int size; + }; + + private OreType[] oreList = new OreType[] { + new OreType() { id = 16, name = "Coal", rounds = 20, min = 0, max = 127, size = 16 }, + new OreType() { id = 15, name = "Iron", rounds = 20, min = 0, max = 63, size = 8 }, + new OreType() { id = 14, name = "Gold", rounds = 2, min = 0, max = 31, size = 8 }, + new OreType() { id = 73, name = "Redstone", rounds = 8, min = 0, max = 31, size = 7 }, + new OreType() { id = 56, name = "Diamond", rounds = 1, min = 0, max = 15, size = 7 }, + new OreType() { id = 21, name = "Lapis", rounds = 1, min = 0, max = 31, size = 7 }, + }; + + public OregenOptions (string[] args) : base(args) + { + filterOpt = new OptionSet() + { + { "b|block=", "Generate blocks of type {ID} (0-255)", + v => OPT_ID = Convert.ToByte(v) % 256 }, + { "d|data=", "Set the block's data value to {VAL} (0-15)", + v => OPT_DATA = Convert.ToInt32(v) % 16 }, + { "r|rounds=", "Geneate {NUM} deposits per chunk", + v => OPT_ROUNDS = Convert.ToInt32(v) }, + { "min=", "Generates deposits no lower than depth {VAL} (0-127)", + v => OPT_MIN = Convert.ToInt32(v) % 128 }, + { "max=", "Generates deposits no higher than depth {VAL} (0-127)", + v => OPT_MAX = Convert.ToInt32(v) % 128 }, + { "s|size=", "Generates deposits containing roughly up to {VAL} blocks", + v => OPT_MIN = Convert.ToInt32(v) % 128 }, + { "oo=", "Generated deposits can replace other existing ores", + v => OPT_OO = true }, + { "oa=", "Generated deposits can replace any existing block (incl. air)", + v => OPT_OA = true }, + { "oi=", "Generated deposits can replace the specified block type [repeatable]", + v => OPT_OB_INCLUDE.Add(Convert.ToInt32(v) % 256) }, + { "ox=", "Generated deposits can never replace the specified block type [repeatable]", + v => OPT_OB_EXCLUDE.Add(Convert.ToInt32(v) % 256) }, + }; + + filterOpt.Parse(args); + } + + public override void PrintUsage () + { + Console.WriteLine("Usage: nbtoolkit oregen -b -w [options]"); + Console.WriteLine(); + Console.WriteLine("Options for command 'oregen':"); + + filterOpt.WriteOptionDescriptions(Console.Out); + + Console.WriteLine(); + base.PrintUsage(); + } + + public override void SetDefaults () + { + base.SetDefaults(); + + foreach (OreType ore in oreList) { + if (OPT_ID != ore.id) { + continue; + } + + if (OPT_ROUNDS == null) { + OPT_ROUNDS = ore.rounds; + } + if (OPT_MIN == null) { + OPT_MIN = ore.min; + } + if (OPT_MAX == null) { + OPT_MAX = ore.max; + } + if (OPT_SIZE == null) { + OPT_SIZE = ore.size; + } + } + + // Check for required parameters + if (OPT_ID == null) { + Console.WriteLine("Error: You must specify a Block ID"); + Console.WriteLine(); + PrintUsage(); + + throw new TKOptionException(); + } + + if (OPT_ROUNDS == null) { + OPT_ROUNDS = 1; + } + + if (OPT_MIN == null || OPT_MAX == null || OPT_SIZE == null) { + if (OPT_MIN == null) { + Console.WriteLine("Error: You must specify the minimum depth for non-ore blocks"); + } + if (OPT_MAX == null) { + Console.WriteLine("Error: You must specify the maximum depth for non-ore blocks"); + } + if (OPT_SIZE == null) { + Console.WriteLine("Error: You must specify the deposit size for non-ore blocks"); + } + + Console.WriteLine(); + PrintUsage(); + + throw new TKOptionException(); + } + } + } + + public class Oregen : TKFilter + { + public const int BLOCK_STONE = 1; + public const int BLOCK_DIRT = 3; + public const int BLOCK_GRAVEL = 13; + public const int BLOCK_GOLD = 14; + public const int BLOCK_IRON = 15; + public const int BLOCK_COAL = 16; + public const int BLOCK_LAPIS = 21; + public const int BLOCK_DIAMOND = 56; + public const int BLOCK_REDSTONE = 73; + + private OregenOptions opt; + + private static Random rand = new Random(); + + public Oregen (OregenOptions o) + { + opt = o; + } + + public override void ApplyChunk (NBT_Tree root) + { + if (root == null || root.getRoot() == null) { + return; + } + + NBT_Tag level = root.getRoot().findTagByName("Level"); + if (level == null || level.type != NBT_Type.TAG_COMPOUND) { + return; + } + + NBT_Tag blocks = level.findTagByName("Blocks"); + if (blocks == null || blocks.type != NBT_Type.TAG_BYTE_ARRAY) { + return; + } + + NBT_Tag data = level.findTagByName("Data"); + if (data == null || data.type != NBT_Type.TAG_BYTE_ARRAY) { + return; + } + + NBT_ByteArray blocks_ary = blocks.value.toByteArray(); + NBT_ByteArray data_ary = data.value.toByteArray(); + + if (opt.OPT_V) { + Console.WriteLine("Generating {0} size {1} deposits of {2} between {3} and {4}", + opt.OPT_ROUNDS, opt.OPT_SIZE, opt.OPT_ID, opt.OPT_MIN, opt.OPT_MAX); + } + + for (int i = 0; i < opt.OPT_ROUNDS; i++) { + if (opt.OPT_VV) { + Console.WriteLine("Generating round {0}...", i); + } + + GenerateDeposit(blocks_ary, data_ary); + } + } + + protected void GenerateDeposit (NBT_ByteArray blocks, NBT_ByteArray data) + { + double x_scale = 0.25 + (rand.NextDouble() * 0.75); + double y_scale = 0.25 + (rand.NextDouble() * 0.75); + double z_scale = 0.25 + (rand.NextDouble() * 0.75); + + if (opt.OPT_VV) { + Console.WriteLine("Selected scale: {0}, {1}, {2}", x_scale, y_scale, z_scale); + } + + double x_len = (double)opt.OPT_SIZE / 8.0 * x_scale; + double z_len = (double)opt.OPT_SIZE / 8.0 * z_scale; + double y_len = ((double)opt.OPT_SIZE / 16.0 + 2.0) * y_scale; + + if (opt.OPT_VV) { + Console.WriteLine("Selected length: {0}, {1}, {2}", x_len, y_len, z_len); + } + + double xpos = rand.NextDouble() * (16.0 - x_len); + double zpos = rand.NextDouble() * (16.0 - z_len); + double ypos = (double)opt.OPT_MIN + (rand.NextDouble() * ((double)opt.OPT_MAX - (double)opt.OPT_MIN)) + 2.0; + + if (opt.OPT_VV) { + Console.WriteLine("Selected initial position: {0}, {1}, {2}", xpos, ypos, zpos); + } + + int sample_size = 2 * (int)opt.OPT_SIZE; + double fuzz = 0.25; + + double x_step = x_len / sample_size; + double y_step = y_len / sample_size; + double z_step = z_len / sample_size; + + for (int i = 0; i < sample_size; i++) { + int tx = (int)Math.Floor(xpos + i * x_step); + int ty = (int)Math.Floor(ypos + i * y_step); + int tz = (int)Math.Floor(zpos + i * z_step); + int txp = (int)Math.Floor(xpos + i * x_step + fuzz); + int typ = (int)Math.Floor(ypos + i * y_step + fuzz); + int tzp = (int)Math.Floor(zpos + i * z_step + fuzz); + + if (tx < 0) tx = 0; + if (ty < 0) ty = 0; + if (tz < 0) tz = 0; + + if (tx >= 16) tx = 15; + if (ty >= 128) ty = 127; + if (tz >= 16) tz = 15; + + if (txp < 0) txp = 0; + if (typ < 0) typ = 0; + if (tzp < 0) tzp = 0; + + if (txp >= 16) txp = 15; + if (typ >= 128) typ = 127; + if (tzp >= 16) tzp = 15; + + UpdateBlock(blocks, data, tx, ty, tz); + UpdateBlock(blocks, data, txp, ty, tz); + UpdateBlock(blocks, data, tx, typ, tz); + UpdateBlock(blocks, data, tx, ty, tzp); + UpdateBlock(blocks, data, txp, typ, tz); + UpdateBlock(blocks, data, tx, typ, tzp); + UpdateBlock(blocks, data, txp, ty, tzp); + UpdateBlock(blocks, data, txp, typ, tzp); + } + } + + protected void UpdateBlock (NBT_ByteArray blocks, NBT_ByteArray data, int x, int y, int z) + { + int index = BlockIndex(x, y, z); + + if (index < 0 || index >= 32768) { + throw new Exception(); + } + + if ( + ((opt.OPT_OA) && (blocks.data[index] != opt.OPT_ID)) || + ((opt.OPT_OO) && ( + blocks.data[index] == BLOCK_COAL || blocks.data[index] == BLOCK_IRON || + blocks.data[index] == BLOCK_GOLD || blocks.data[index] == BLOCK_REDSTONE || + blocks.data[index] == BLOCK_DIAMOND || blocks.data[index] == BLOCK_LAPIS || + blocks.data[index] == BLOCK_DIRT || blocks.data[index] == BLOCK_GRAVEL) && (blocks.data[index] != opt.OPT_ID)) || + (opt.OPT_OB_INCLUDE.Count > 0) || + (blocks.data[index] == BLOCK_STONE) + ) { + // If overriding list of ores, check membership + if (opt.OPT_OB_INCLUDE.Count > 0 && !opt.OPT_OB_INCLUDE.Contains(blocks.data[index])) { + return; + } + + // Check for any excluded block + if (opt.OPT_OB_EXCLUDE.Contains(blocks.data[index])) { + return; + } + + blocks.data[index] = (byte)opt.OPT_ID; + + if (opt.OPT_VV) { + Console.WriteLine("Added block at {0},{1},{2}", x, y, z); + } + + if (opt.OPT_DATA != null) { + if (index % 2 == 0) { + data.data[index / 2] = (byte)((data.data[index / 2] & 0xF0) | (int)opt.OPT_DATA); + } + else { + data.data[index / 2] = (byte)((data.data[index / 2] & 0x0F) | ((int)opt.OPT_DATA << 4)); + } + } + } + } + + int BlockIndex (int x, int y, int z) { + return y + (z * 128 + x * 128 * 16); + } + } +} diff --git a/NBToolkit/NBToolkit/Program.cs b/NBToolkit/NBToolkit/Program.cs new file mode 100644 index 0000000..85e9ca7 --- /dev/null +++ b/NBToolkit/NBToolkit/Program.cs @@ -0,0 +1,110 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using NBT; + +namespace NBToolkit +{ + class Program + { + static void Main (string[] args) + { + Harness harness = new Harness(); + + if (args.Length < 1) { + args = new string[1] { "" }; + } + + try { + if (args[0] == "oregen") { + OregenOptions options = new OregenOptions(args); + Oregen filter = new Oregen(options); + options.SetDefaults(); + harness.Run(options, filter); + } + else if (args[0] == "replace") { + ReplaceOptions options = new ReplaceOptions(args); + Replace filter = new Replace(options); + options.SetDefaults(); + harness.Run(options, filter); + } + else if (args[0] == "help") { + if (args.Length < 2) { + args = new string[2] { "help", "help" }; + } + + Console.WriteLine("Command: " + args[1]); + Console.WriteLine(); + + TKOptions options = null; + + if (args[1] == "oregen") { + options = new OregenOptions(args); + + WriteBlock("Generates one or more structured deposits of a single block type randomly within each chunk selected for update. The deposits are similar to natural ore deposits that appear in the world. Default values for depth, size, and number of rounds are provided for the true ores (coal, iron, gold, etc,). Other block types can be used as long as these values are specified in the command."); + Console.WriteLine(); + options.PrintUsage(); + } + else if (args[1] == "replace") { + options = new ReplaceOptions(args); + + WriteBlock("Replaces one block type with another. By default all matching blocks in the world will be replaced, but updates can be restricted by the available options. This command can be used to set a new data value on blocks by replacing a block with itself."); + Console.WriteLine(); + options.PrintUsage(); + } + else { + WriteBlock("Prints help and usage information for another command. Available commands are 'oregen' and 'replace'."); + Console.WriteLine(); + Console.WriteLine("Usage: nbtoolkit help "); + } + + return; + } + else { + TKOptions options = null; + try { + options = new TKOptions(args); + } + catch (TKOptionException) { } + + Console.WriteLine("Usage: nbtoolkit [options]"); + Console.WriteLine(); + Console.WriteLine("Available commands:"); + Console.WriteLine(" help Get help and usage info for another command"); + Console.WriteLine(" oregen Generate structured deposits of a single block type"); + Console.WriteLine(" replace Replace one block type with another"); + Console.WriteLine(); + options.PrintUsage(); + return; + } + } + catch (TKOptionException) { + return; + } + + Console.WriteLine("Done"); + } + + public static void WriteBlock (string str) + { + string buf = ""; + + while (str.Length > 78) { + for (int i = 78; i >= 0; i--) { + if (str[i] == ' ') { + if (buf.Length > 0) { + buf += " "; + } + buf += str.Substring(0, i) + "\n"; + str = str.Substring(i + 1); + break; + } + } + } + + buf += " " + str; + Console.WriteLine(buf); + } + } +} diff --git a/NBToolkit/NBToolkit/Properties/AssemblyInfo.cs b/NBToolkit/NBToolkit/Properties/AssemblyInfo.cs new file mode 100644 index 0000000..08af08c --- /dev/null +++ b/NBToolkit/NBToolkit/Properties/AssemblyInfo.cs @@ -0,0 +1,36 @@ +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[assembly: AssemblyTitle("NBToolkit")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("Microsoft")] +[assembly: AssemblyProduct("NBToolkit")] +[assembly: AssemblyCopyright("Copyright © Microsoft 2011")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from +// COM, set the ComVisible attribute to true on that type. +[assembly: ComVisible(false)] + +// The following GUID is for the ID of the typelib if this project is exposed to COM +[assembly: Guid("7eece4a1-8471-43dc-b3b8-fb6b2dc3773e")] + +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Build and Revision Numbers +// by using the '*' as shown below: +// [assembly: AssemblyVersion("1.0.*")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/NBToolkit/NBToolkit/RegionFile.cs b/NBToolkit/NBToolkit/RegionFile.cs new file mode 100644 index 0000000..c54c39b --- /dev/null +++ b/NBToolkit/NBToolkit/RegionFile.cs @@ -0,0 +1,376 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.IO; +using Ionic.Zlib; +using System.Collections; + +namespace NBToolkit +{ + public class RegionFile { + + private const int VERSION_GZIP = 1; + private const int VERSION_DEFLATE = 2; + + private const int SECTOR_BYTES = 4096; + private const int SECTOR_INTS = SECTOR_BYTES / 4; + + const int CHUNK_HEADER_SIZE = 5; + + private static byte[] emptySector = new byte[4096]; + + private string fileName; + private FileStream file; + private int[] offsets; + private int[] chunkTimestamps; + private List sectorFree; + private int sizeDelta; + private long lastModified = 0; + + public RegionFile(string path) { + offsets = new int[SECTOR_INTS]; + chunkTimestamps = new int[SECTOR_INTS]; + + fileName = path; + Debugln("REGION LOAD " + fileName); + + sizeDelta = 0; + + try { + if (File.Exists(path)) { + lastModified = Timestamp(File.GetLastWriteTime(path)); + } + + file = new FileStream(path, FileMode.OpenOrCreate, FileAccess.ReadWrite); // RandomAccessFile(path, "rw"); + + if (file.Length < SECTOR_BYTES) { + byte[] int0 = BitConverter.GetBytes((int)0); + + /* we need to write the chunk offset table */ + for (int i = 0; i < SECTOR_INTS; ++i) { + file.Write(int0, 0, 4); + } + // write another sector for the timestamp info + for (int i = 0; i < SECTOR_INTS; ++i) { + file.Write(int0, 0, 4); + } + + sizeDelta += SECTOR_BYTES * 2; + } + + if ((file.Length & 0xfff) != 0) { + /* the file size is not a multiple of 4KB, grow it */ + for (int i = 0; i < (file.Length & 0xfff); ++i) { + file.WriteByte(0); + } + } + + /* set up the available sector map */ + int nSectors = (int) file.Length / SECTOR_BYTES; + sectorFree = new List(nSectors); + + for (int i = 0; i < nSectors; ++i) { + sectorFree.Add(true); + } + + sectorFree[0] = false; // chunk offset table + sectorFree[1] = false; // for the last modified info + + file.Seek(0, SeekOrigin.Begin); + for (int i = 0; i < SECTOR_INTS; ++i) { + byte[] offsetBytes = new byte[4]; + file.Read(offsetBytes, 0, 4); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(offsetBytes); + } + int offset = BitConverter.ToInt32(offsetBytes, 0); + + offsets[i] = offset; + if (offset != 0 && (offset >> 8) + (offset & 0xFF) <= sectorFree.Count) { + for (int sectorNum = 0; sectorNum < (offset & 0xFF); ++sectorNum) { + sectorFree[(offset >> 8) + sectorNum] = false; + } + } + } + for (int i = 0; i < SECTOR_INTS; ++i) { + byte[] modBytes = new byte[4]; + file.Read(modBytes, 0, 4); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(modBytes); + } + int lastModValue = BitConverter.ToInt32(modBytes, 0); + + chunkTimestamps[i] = lastModValue; + } + } catch (IOException e) { + System.Console.WriteLine(e.StackTrace); + } + } + + /* the modification date of the region file when it was first opened */ + public long LastModified() { + return lastModified; + } + + /* gets how much the region file has grown since it was last checked */ + public int GetSizeDelta() { + int ret = sizeDelta; + sizeDelta = 0; + return ret; + } + + // various small debug printing helpers + private void Debug(String str) { + // System.Consle.Write(str); + } + + private void Debugln(String str) { + Debug(str + "\n"); + } + + private void Debug(String mode, int x, int z, String str) { + Debug("REGION " + mode + " " + fileName + "[" + x + "," + z + "] = " + str); + } + + private void Debug(String mode, int x, int z, int count, String str) { + Debug("REGION " + mode + " " + fileName + "[" + x + "," + z + "] " + count + "B = " + str); + } + + private void Debugln(String mode, int x, int z, String str) { + Debug(mode, x, z, str + "\n"); + } + + /* + * gets an (uncompressed) stream representing the chunk data returns null if + * the chunk is not found or an error occurs + */ + public Stream GetChunkDataInputStream(int x, int z) { + if (OutOfBounds(x, z)) { + Debugln("READ", x, z, "out of bounds"); + return null; + } + + try { + int offset = GetOffset(x, z); + if (offset == 0) { + // Debugln("READ", x, z, "miss"); + return null; + } + + int sectorNumber = offset >> 8; + int numSectors = offset & 0xFF; + + if (sectorNumber + numSectors > sectorFree.Count) { + Debugln("READ", x, z, "invalid sector"); + return null; + } + + file.Seek(sectorNumber * SECTOR_BYTES, SeekOrigin.Begin); + byte[] lengthBytes = new byte[4]; + file.Read(lengthBytes, 0, 4); + + if (BitConverter.IsLittleEndian) { + Array.Reverse(lengthBytes); + } + int length = BitConverter.ToInt32(lengthBytes, 0); + + if (length > SECTOR_BYTES * numSectors) { + Debugln("READ", x, z, "invalid length: " + length + " > 4096 * " + numSectors); + return null; + } + + byte version = (byte)file.ReadByte(); + if (version == VERSION_GZIP) { + byte[] data = new byte[length - 1]; + file.Read(data, 0, data.Length); + Stream ret = new GZipStream(new MemoryStream(data), CompressionMode.Decompress); + // Debug("READ", x, z, " = found"); + return ret; + } else if (version == VERSION_DEFLATE) { + byte[] data = new byte[length - 1]; + file.Read(data, 0, data.Length); + Stream ret = new ZlibStream(new MemoryStream(data), CompressionMode.Decompress, true); + // Debug("READ", x, z, " = found"); + return ret; + } + + Debugln("READ", x, z, "unknown version " + version); + return null; + } catch (IOException) { + Debugln("READ", x, z, "exception"); + return null; + } + } + + public Stream GetChunkDataOutputStream(int x, int z) { + if (OutOfBounds(x, z)) return null; + + return new ZlibStream(new ChunkBuffer(this, x, z), CompressionMode.Compress); + } + + /* + * lets chunk writing be multithreaded by not locking the whole file as a + * chunk is serializing -- only writes when serialization is over + */ + class ChunkBuffer : MemoryStream { + private int x, z; + private RegionFile region; + + public ChunkBuffer(RegionFile r, int x, int z) : base(8096) { + // super(8096); // initialize to 8KB + this.region = r; + this.x = x; + this.z = z; + } + + public override void Close() { + region.Write(x, z, this.GetBuffer(), (int)this.Length); + } + } + + /* write a chunk at (x,z) with length bytes of data to disk */ + protected void Write(int x, int z, byte[] data, int length) { + try { + int offset = GetOffset(x, z); + int sectorNumber = offset >> 8; + int sectorsAllocated = offset & 0xFF; + int sectorsNeeded = (length + CHUNK_HEADER_SIZE) / SECTOR_BYTES + 1; + + // maximum chunk size is 1MB + if (sectorsNeeded >= 256) { + return; + } + + if (sectorNumber != 0 && sectorsAllocated == sectorsNeeded) { + /* we can simply overwrite the old sectors */ + Debug("SAVE", x, z, length, "rewrite"); + Write(sectorNumber, data, length); + } else { + /* we need to allocate new sectors */ + + /* mark the sectors previously used for this chunk as free */ + for (int i = 0; i < sectorsAllocated; ++i) { + sectorFree[sectorNumber + i] = true; + } + + /* scan for a free space large enough to store this chunk */ + int runStart = sectorFree.FindIndex(b => b == true); + int runLength = 0; + if (runStart != -1) { + for (int i = runStart; i < sectorFree.Count; ++i) { + if (runLength != 0) { + if (sectorFree[i]) runLength++; + else runLength = 0; + } else if (sectorFree[i]) { + runStart = i; + runLength = 1; + } + if (runLength >= sectorsNeeded) { + break; + } + } + } + + if (runLength >= sectorsNeeded) { + /* we found a free space large enough */ + Debug("SAVE", x, z, length, "reuse"); + sectorNumber = runStart; + SetOffset(x, z, (sectorNumber << 8) | sectorsNeeded); + for (int i = 0; i < sectorsNeeded; ++i) { + sectorFree[sectorNumber + i] = false; + } + Write(sectorNumber, data, length); + } else { + /* + * no free space large enough found -- we need to grow the + * file + */ + Debug("SAVE", x, z, length, "grow"); + file.Seek(0, SeekOrigin.End); + sectorNumber = sectorFree.Count; + for (int i = 0; i < sectorsNeeded; ++i) { + file.Write(emptySector, 0, emptySector.Length); + sectorFree.Add(false); + } + sizeDelta += SECTOR_BYTES * sectorsNeeded; + + Write(sectorNumber, data, length); + SetOffset(x, z, (sectorNumber << 8) | sectorsNeeded); + } + } + SetTimestamp(x, z, Timestamp()); + } catch (IOException e) { + Console.WriteLine(e.StackTrace); + } + } + + /* write a chunk data to the region file at specified sector number */ + private void Write(int sectorNumber, byte[] data, int length) { + Debugln(" " + sectorNumber); + file.Seek(sectorNumber * SECTOR_BYTES, SeekOrigin.Begin); + + byte[] bytes = BitConverter.GetBytes(length + 1); + if (BitConverter.IsLittleEndian) {; + Array.Reverse(bytes); + } + file.Write(bytes, 0, 4); // chunk length + file.WriteByte(VERSION_DEFLATE); // chunk version number + file.Write(data, 0, length); // chunk data + } + + /* is this an invalid chunk coordinate? */ + private bool OutOfBounds(int x, int z) { + return x < 0 || x >= 32 || z < 0 || z >= 32; + } + + private int GetOffset(int x, int z) { + return offsets[x + z * 32]; + } + + public bool HasChunk(int x, int z) { + return GetOffset(x, z) != 0; + } + + private void SetOffset(int x, int z, int offset) { + offsets[x + z * 32] = offset; + file.Seek((x + z * 32) * 4, SeekOrigin.Begin); + + byte[] bytes = BitConverter.GetBytes(offset); + if (BitConverter.IsLittleEndian) {; + Array.Reverse(bytes); + } + + file.Write(bytes, 0, 4); + } + + private int Timestamp () { + DateTime epoch = new DateTime(1970, 1, 1, 0, 0, 0, 0); + return (int)((DateTime.UtcNow - epoch).Ticks / (10000L * 1000L)); + } + + private int Timestamp (DateTime time) + { + DateTime epoch = new DateTime(1970, 1, 1, 0, 0, 0, 0); + return (int)((time - epoch).Ticks / (10000L * 1000L)); + } + + private void SetTimestamp(int x, int z, int value) { + chunkTimestamps[x + z * 32] = value; + file.Seek(SECTOR_BYTES + (x + z * 32) * 4, SeekOrigin.Begin); + + byte[] bytes = BitConverter.GetBytes(value); + if (BitConverter.IsLittleEndian) {; + Array.Reverse(bytes); + } + + file.Write(bytes, 0, 4); + } + + public void Close() { + file.Close(); + } + } +} diff --git a/NBToolkit/NBToolkit/RegionFileCache.cs b/NBToolkit/NBToolkit/RegionFileCache.cs new file mode 100644 index 0000000..781b833 --- /dev/null +++ b/NBToolkit/NBToolkit/RegionFileCache.cs @@ -0,0 +1,97 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.IO; + +namespace NBToolkit +{ + public class RegionFileCache + { + private const int MAX_CACHE_SIZE = 256; + + private static Dictionary cache = new Dictionary(); + + private RegionFileCache() { + } + + public static RegionFile GetRegionFile(string basePath, string fileName) { + string regionDir = Path.Combine(basePath, "region"); + string file = Path.Combine(regionDir, fileName); + + RegionFile rf = null; + if (cache.ContainsKey(file)) { + rf = cache[file].Target as RegionFile; + } + + if (rf != null) { + return rf; + } + + if (!Directory.Exists(regionDir)) { + Directory.CreateDirectory(regionDir); + } + + if (cache.Count >= MAX_CACHE_SIZE) { + RegionFileCache.Clear(); + } + + RegionFile reg = new RegionFile(file); + cache.Add(file, new WeakReference(reg)); + return reg; + } + + public static RegionFile GetRegionFile (string basePath, int chunkX, int chunkZ) + { + string regionDir = Path.Combine(basePath, "region"); + string fileName = Path.Combine(regionDir, "r." + (chunkX >> 5) + "." + (chunkZ >> 5) + ".mcr"); + + return GetRegionFile(basePath, fileName); + } + + public static string[] GetRegionFileList (string basePath) + { + string regionDir = Path.Combine(basePath, "region"); + + if (!Directory.Exists(regionDir)) { + Directory.CreateDirectory(regionDir); + } + + string[] files = Directory.GetFiles(regionDir); + List valid = new List(); + + foreach (string file in files) { + if (System.Text.RegularExpressions.Regex.IsMatch(file, "r\\.-?[0-9]+\\.-?[0-9]+\\.mcr$")) { + valid.Add(Path.GetFileName(file)); + } + } + + return valid.ToArray(); + } + + public static void Clear() { + foreach (WeakReference wr in cache.Values) { + RegionFile rf = wr.Target as RegionFile; + if (rf != null) { + rf.Close(); + } + } + cache.Clear(); + } + + public static int getSizeDelta(string basePath, int chunkX, int chunkZ) { + RegionFile r = GetRegionFile(basePath, chunkX, chunkZ); + return r.GetSizeDelta(); + } + + public static Stream getChunkDataInputStream(string basePath, int chunkX, int chunkZ) { + RegionFile r = GetRegionFile(basePath, chunkX, chunkZ); + return r.GetChunkDataInputStream(chunkX & 31, chunkZ & 31); + } + + public static Stream getChunkDataOutputStream(string basePath, int chunkX, int chunkZ) { + RegionFile r = GetRegionFile(basePath, chunkX, chunkZ); + return r.GetChunkDataOutputStream(chunkX & 31, chunkZ & 31); + } + } +} diff --git a/NBToolkit/NBToolkit/Replace.cs b/NBToolkit/NBToolkit/Replace.cs new file mode 100644 index 0000000..ac44b22 --- /dev/null +++ b/NBToolkit/NBToolkit/Replace.cs @@ -0,0 +1,141 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using NDesk.Options; +using NBT; + +namespace NBToolkit +{ + public class ReplaceOptions : TKOptions + { + private OptionSet filterOpt = null; + + public int? OPT_BEFORE = null; + public int? OPT_AFTER = null; + + public int? OPT_DATA = null; + public double? OPT_PROB = null; + + public ReplaceOptions (string[] args) : base(args) + { + filterOpt = new OptionSet() + { + { "b|before=", "Replace instances of block type {ID} with another block type", + v => OPT_BEFORE = Convert.ToInt32(v) % 256 }, + { "a|after=", "Replace the selected blocks with block type {ID}", + v => OPT_AFTER = Convert.ToInt32(v) % 256 }, + { "d|data=", "Set the new block's data value to {VAL} (0-15)", + v => OPT_DATA = Convert.ToInt32(v) % 16 }, + { "p|prob=", "Replace any matching block with probability {VAL} (0.0-1.0)", + v => { OPT_PROB = Convert.ToDouble(v); + OPT_PROB = Math.Max((double)OPT_PROB, 0.0); + OPT_PROB = Math.Min((double)OPT_PROB, 1.0); } }, + }; + + filterOpt.Parse(args); + } + + public override void PrintUsage () + { + Console.WriteLine("Usage: nbtoolkit replace -b -a [options]"); + Console.WriteLine(); + Console.WriteLine("Options for command 'replace':"); + + filterOpt.WriteOptionDescriptions(Console.Out); + + Console.WriteLine(); + base.PrintUsage(); + } + + public override void SetDefaults () + { + base.SetDefaults(); + + // Check for required parameters + if (OPT_BEFORE == null || OPT_AFTER == null) { + Console.WriteLine("Error: You must specify a before and after Block ID"); + Console.WriteLine(); + PrintUsage(); + + throw new TKOptionException(); + } + } + } + + class Replace : TKFilter + { + private ReplaceOptions opt; + + private static Random rand = new Random(); + + public Replace (ReplaceOptions o) + { + opt = o; + } + + public override void ApplyChunk (NBT_Tree root) + { + if (root == null || root.getRoot() == null) { + return; + } + + NBT_Tag level = root.getRoot().findTagByName("Level"); + if (level == null || level.type != NBT_Type.TAG_COMPOUND) { + return; + } + + NBT_Tag blocks = level.findTagByName("Blocks"); + if (blocks == null || blocks.type != NBT_Type.TAG_BYTE_ARRAY) { + return; + } + + NBT_Tag data = level.findTagByName("Data"); + if (data == null || data.type != NBT_Type.TAG_BYTE_ARRAY) { + return; + } + + NBT_ByteArray blocks_ary = blocks.value.toByteArray(); + NBT_ByteArray data_ary = data.value.toByteArray(); + + int ymin = 0; + int ymax = 127; + + for (int y = ymin; y <= ymax; y++) { + for (int x = 0; x < 16; x++) { + for (int z = 0; z < 16; z++) { + if (opt.OPT_PROB != null) { + double c = rand.NextDouble(); + if (c > opt.OPT_PROB) { + continue; + } + } + + int index = BlockIndex(x, y, z); + if (blocks_ary.data[index] == opt.OPT_BEFORE) { + blocks_ary.data[index] = (byte)opt.OPT_AFTER; + + if (opt.OPT_VV) { + Console.WriteLine("Replaced block at {0},{1},{2}", x, y, z); + } + + if (opt.OPT_DATA != null) { + if (index % 2 == 0) { + data_ary.data[index / 2] = (byte)((data_ary.data[index / 2] & 0xF0) | (int)opt.OPT_DATA); + } + else { + data_ary.data[index / 2] = (byte)((data_ary.data[index / 2] & 0x0F) | ((int)opt.OPT_DATA << 4)); + } + } + } + } + } + } + } + + int BlockIndex (int x, int y, int z) + { + return y + (z * 128 + x * 128 * 16); + } + } +} diff --git a/NBToolkit/NBToolkit/TKFilter.cs b/NBToolkit/NBToolkit/TKFilter.cs new file mode 100644 index 0000000..d024ade --- /dev/null +++ b/NBToolkit/NBToolkit/TKFilter.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using NBT; + +namespace NBToolkit +{ + public abstract class TKFilter + { + public abstract void ApplyChunk (NBT_Tree root); + } +} diff --git a/NBToolkit/NBToolkit/TKOptions.cs b/NBToolkit/NBToolkit/TKOptions.cs new file mode 100644 index 0000000..87b497d --- /dev/null +++ b/NBToolkit/NBToolkit/TKOptions.cs @@ -0,0 +1,105 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using NDesk.Options; +using System.IO; + +namespace NBToolkit +{ + public class TKOptions + { + private OptionSet commonOpt = null; + + public string OPT_WORLD = ""; + + // Verbosity + public bool OPT_V = false; + public bool OPT_VV = false; + public bool OPT_HELP = false; + + // Block ID Include / Exclude conditions by chunk + public List OPT_CH_INCLUDE = new List(); + public List OPT_CH_EXCLUDE = new List(); + + // Chunk coordinate conditions + public int? CH_X_GE = null; + public int? CH_X_LE = null; + public int? CH_Z_GE = null; + public int? CH_Z_LE = null; + + // Block coordinate conditions + public int? BL_X_GE = null; + public int? BL_X_LE = null; + public int? BL_Z_GE = null; + public int? BL_Z_LE = null; + + public TKOptions (string[] args) + { + commonOpt = new OptionSet() + { + { "w|world=", "World directory", + v => OPT_WORLD = v }, + { "h|help", "Print this help message", + v => OPT_HELP = true }, + { "v", "Verbose output", + v => OPT_V = true }, + { "vv", "Very verbose output", + v => { OPT_V = true; OPT_VV = true; } }, + { "cxa=", "Update chunks with X-coord equal to or above {VAL}", + v => CH_X_GE = Convert.ToInt32(v) }, + { "cxb=", "Update chunks with X-coord equal to or below {VAL}", + v => CH_X_LE = Convert.ToInt32(v) }, + { "cza=", "Update chunks with Z-coord equal to or above {VAL}", + v => CH_Z_GE = Convert.ToInt32(v) }, + { "czb=", "Update chunks with Z-coord equal to or below {VAL}", + v => CH_Z_LE = Convert.ToInt32(v) }, + { "ci=", "Update chunks that contain an {ID} block", + v => OPT_CH_INCLUDE.Add(Convert.ToInt32(v) % 256) }, + { "cx=", "Update chunks that don't contain an {ID} block", + v => OPT_CH_EXCLUDE.Add(Convert.ToInt32(v) % 256) }, + }; + + commonOpt.Parse(args); + } + + public virtual void PrintUsage () + { + Console.WriteLine("Common Options:"); + commonOpt.WriteOptionDescriptions(Console.Out); + } + + public virtual void SetDefaults () + { + if (OPT_HELP) { + this.PrintUsage(); + throw new TKOptionException(); + } + + if (OPT_WORLD.Length == 0) { + Console.WriteLine("Error: You must specify a World path"); + Console.WriteLine(); + this.PrintUsage(); + + throw new TKOptionException(); + } + + if (!File.Exists(Path.Combine(OPT_WORLD, "Level.dat"))) { + Console.WriteLine("Error: The supplied world path is invalid"); + Console.WriteLine(); + this.PrintUsage(); + + throw new TKOptionException(); + } + } + } + + class TKOptionException : Exception + { + public TKOptionException () { } + + public TKOptionException (String msg) : base(msg) { } + + public TKOptionException (String msg, Exception innerException) : base(msg, innerException) { } + } +} diff --git a/NBToolkit/NBToolkit/Zlib/Crc32.cs b/NBToolkit/NBToolkit/Zlib/Crc32.cs new file mode 100644 index 0000000..a08bdfd --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/Crc32.cs @@ -0,0 +1,572 @@ +// Crc32.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2006-2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2010-January-16 13:16:27> +// +// ------------------------------------------------------------------ +// +// Implements the CRC algorithm, which is used in zip files. The zip format calls for +// the zipfile to contain a CRC for the unencrypted byte stream of each file. +// +// It is based on example source code published at +// http://www.vbaccelerator.com/home/net/code/libraries/CRC32/Crc32_zip_CRC32_CRC32_cs.asp +// +// This implementation adds a tweak of that code for use within zip creation. While +// computing the CRC we also compress the byte stream, in the same read loop. This +// avoids the need to read through the uncompressed stream twice - once to compute CRC +// and another time to compress. +// +// ------------------------------------------------------------------ + + + +using System; +using Interop=System.Runtime.InteropServices; + + +namespace Ionic.Zlib +{ + /// + /// Calculates a 32bit Cyclic Redundancy Checksum (CRC) using the same polynomial + /// used by Zip. This type is used internally by DotNetZip; it is generally not used + /// directly by applications wishing to create, read, or manipulate zip archive + /// files. + /// + + [Interop.GuidAttribute("ebc25cf6-9120-4283-b972-0e5520d0000C")] + [Interop.ComVisible(true)] +#if !NETCF + [Interop.ClassInterface(Interop.ClassInterfaceType.AutoDispatch)] +#endif + public class CRC32 + { + /// + /// indicates the total number of bytes read on the CRC stream. + /// This is used when writing the ZipDirEntry when compressing files. + /// + public Int64 TotalBytesRead + { + get + { + return _TotalBytesRead; + } + } + + /// + /// Indicates the current CRC for all blocks slurped in. + /// + public Int32 Crc32Result + { + get + { + // return one's complement of the running result + return unchecked((Int32)(~_RunningCrc32Result)); + } + } + + /// + /// Returns the CRC32 for the specified stream. + /// + /// The stream over which to calculate the CRC32 + /// the CRC32 calculation + public Int32 GetCrc32(System.IO.Stream input) + { + return GetCrc32AndCopy(input, null); + } + + /// + /// Returns the CRC32 for the specified stream, and writes the input into the + /// output stream. + /// + /// The stream over which to calculate the CRC32 + /// The stream into which to deflate the input + /// the CRC32 calculation + public Int32 GetCrc32AndCopy(System.IO.Stream input, System.IO.Stream output) + { + if (input == null) + throw new ZlibException("The input stream must not be null."); + + unchecked + { + //UInt32 crc32Result; + //crc32Result = 0xFFFFFFFF; + byte[] buffer = new byte[BUFFER_SIZE]; + int readSize = BUFFER_SIZE; + + _TotalBytesRead = 0; + int count = input.Read(buffer, 0, readSize); + if (output != null) output.Write(buffer, 0, count); + _TotalBytesRead += count; + while (count > 0) + { + SlurpBlock(buffer, 0, count); + count = input.Read(buffer, 0, readSize); + if (output != null) output.Write(buffer, 0, count); + _TotalBytesRead += count; + } + + return (Int32)(~_RunningCrc32Result); + } + } + + + /// + /// Get the CRC32 for the given (word,byte) combo. This is a computation + /// defined by PKzip. + /// + /// The word to start with. + /// The byte to combine it with. + /// The CRC-ized result. + public Int32 ComputeCrc32(Int32 W, byte B) + { + return _InternalComputeCrc32((UInt32)W, B); + } + + internal Int32 _InternalComputeCrc32(UInt32 W, byte B) + { + return (Int32)(crc32Table[(W ^ B) & 0xFF] ^ (W >> 8)); + } + + /// + /// Update the value for the running CRC32 using the given block of bytes. + /// This is useful when using the CRC32() class in a Stream. + /// + /// block of bytes to slurp + /// starting point in the block + /// how many bytes within the block to slurp + public void SlurpBlock(byte[] block, int offset, int count) + { + if (block == null) + throw new ZlibException("The data buffer must not be null."); + + for (int i = 0; i < count; i++) + { + int x = offset + i; + _RunningCrc32Result = ((_RunningCrc32Result) >> 8) ^ crc32Table[(block[x]) ^ ((_RunningCrc32Result) & 0x000000FF)]; + } + _TotalBytesRead += count; + } + + + // pre-initialize the crc table for speed of lookup. + static CRC32() + { + unchecked + { + // PKZip specifies CRC32 with a polynomial of 0xEDB88320; + // This is also the CRC-32 polynomial used bby Ethernet, FDDI, + // bzip2, gzip, and others. + // Often the polynomial is shown reversed as 0x04C11DB7. + // For more details, see http://en.wikipedia.org/wiki/Cyclic_redundancy_check + UInt32 dwPolynomial = 0xEDB88320; + UInt32 i, j; + + crc32Table = new UInt32[256]; + + UInt32 dwCrc; + for (i = 0; i < 256; i++) + { + dwCrc = i; + for (j = 8; j > 0; j--) + { + if ((dwCrc & 1) == 1) + { + dwCrc = (dwCrc >> 1) ^ dwPolynomial; + } + else + { + dwCrc >>= 1; + } + } + crc32Table[i] = dwCrc; + } + } + } + + + + + private uint gf2_matrix_times(uint[] matrix, uint vec) + { + uint sum = 0; + int i=0; + while (vec != 0) + { + if ((vec & 0x01)== 0x01) + sum ^= matrix[i]; + vec >>= 1; + i++; + } + return sum; + } + + private void gf2_matrix_square(uint[] square, uint[] mat) + { + for (int i = 0; i < 32; i++) + square[i] = gf2_matrix_times(mat, mat[i]); + } + + + + /// + /// Combines the given CRC32 value with the current running total. + /// + /// + /// This is useful when using a divide-and-conquer approach to calculating a CRC. + /// Multiple threads can each calculate a CRC32 on a segment of the data, and then + /// combine the individual CRC32 values at the end. + /// + /// the crc value to be combined with this one + /// the length of data the CRC value was calculated on + public void Combine(int crc, int length) + { + uint[] even = new uint[32]; // even-power-of-two zeros operator + uint[] odd = new uint[32]; // odd-power-of-two zeros operator + + if (length == 0) + return; + + uint crc1= ~_RunningCrc32Result; + uint crc2= (uint) crc; + + // put operator for one zero bit in odd + odd[0] = 0xEDB88320; // the CRC-32 polynomial + uint row = 1; + for (int i = 1; i < 32; i++) + { + odd[i] = row; + row <<= 1; + } + + // put operator for two zero bits in even + gf2_matrix_square(even, odd); + + // put operator for four zero bits in odd + gf2_matrix_square(odd, even); + + uint len2 = (uint) length; + + // apply len2 zeros to crc1 (first square will put the operator for one + // zero byte, eight zero bits, in even) + do { + // apply zeros operator for this bit of len2 + gf2_matrix_square(even, odd); + + if ((len2 & 1)== 1) + crc1 = gf2_matrix_times(even, crc1); + len2 >>= 1; + + if (len2 == 0) + break; + + // another iteration of the loop with odd and even swapped + gf2_matrix_square(odd, even); + if ((len2 & 1)==1) + crc1 = gf2_matrix_times(odd, crc1); + len2 >>= 1; + + + } while (len2 != 0); + + crc1 ^= crc2; + + _RunningCrc32Result= ~crc1; + + //return (int) crc1; + return; + } + + + + // private member vars + private Int64 _TotalBytesRead; + private static readonly UInt32[] crc32Table; + private const int BUFFER_SIZE = 8192; + private UInt32 _RunningCrc32Result = 0xFFFFFFFF; + + } + + + + + /// + /// A Stream that calculates a CRC32 (a checksum) on all bytes read, + /// or on all bytes written. + /// + /// + /// + /// + /// This class can be used to verify the CRC of a ZipEntry when + /// reading from a stream, or to calculate a CRC when writing to a + /// stream. The stream should be used to either read, or write, but + /// not both. If you intermix reads and writes, the results are not + /// defined. + /// + /// + /// + /// This class is intended primarily for use internally by the + /// DotNetZip library. + /// + /// + public class CrcCalculatorStream : System.IO.Stream, System.IDisposable + { + private static readonly Int64 UnsetLengthLimit = -99; + + internal System.IO.Stream _innerStream; + private CRC32 _Crc32; + private Int64 _lengthLimit = -99; + private bool _leaveOpen; + + /// + /// Gets the total number of bytes run through the CRC32 calculator. + /// + /// + /// + /// This is either the total number of bytes read, or the total number of bytes + /// written, depending on the direction of this stream. + /// + public Int64 TotalBytesSlurped + { + get { return _Crc32.TotalBytesRead; } + } + + + /// + /// The default constructor. + /// + /// + /// Instances returned from this constructor will leave the underlying stream + /// open upon Close(). + /// + /// The underlying stream + public CrcCalculatorStream(System.IO.Stream stream) + : this(true, CrcCalculatorStream.UnsetLengthLimit, stream) + { + } + + + /// + /// The constructor allows the caller to specify how to handle the underlying + /// stream at close. + /// + /// The underlying stream + /// true to leave the underlying stream + /// open upon close of the CrcCalculatorStream.; false otherwise. + public CrcCalculatorStream(System.IO.Stream stream, bool leaveOpen) + : this(leaveOpen, CrcCalculatorStream.UnsetLengthLimit, stream) + { + } + + + /// + /// A constructor allowing the specification of the length of the stream to read. + /// + /// + /// Instances returned from this constructor will leave the underlying stream open + /// upon Close(). + /// + /// The underlying stream + /// The length of the stream to slurp + public CrcCalculatorStream(System.IO.Stream stream, Int64 length) + : this(true, length, stream) + { + if (length < 0) + throw new ArgumentException("length"); + } + + /// + /// A constructor allowing the specification of the length of the stream to + /// read, as well as whether to keep the underlying stream open upon Close(). + /// + /// The underlying stream + /// The length of the stream to slurp + /// true to leave the underlying stream + /// open upon close of the CrcCalculatorStream.; false otherwise. + public CrcCalculatorStream(System.IO.Stream stream, Int64 length, bool leaveOpen) + : this(leaveOpen, length, stream) + { + if (length < 0) + throw new ArgumentException("length"); + } + + + // This ctor is private - no validation is done here. This is to allow the use + // of a (specific) negative value for the _lengthLimit, to indicate that there + // is no length set. So we validate the length limit in those ctors that use an + // explicit param, otherwise we don't validate, because it could be our special + // value. + private CrcCalculatorStream(bool leaveOpen, Int64 length, System.IO.Stream stream) + : base() + { + _innerStream = stream; + _Crc32 = new CRC32(); + _lengthLimit = length; + _leaveOpen = leaveOpen; + } + + /// + /// Provides the current CRC for all blocks slurped in. + /// + public Int32 Crc + { + get { return _Crc32.Crc32Result; } + } + + /// + /// Indicates whether the underlying stream will be left open when the + /// CrcCalculatorStream is Closed. + /// + public bool LeaveOpen + { + get { return _leaveOpen; } + set { _leaveOpen = value; } + } + + /// + /// Read from the stream + /// + /// the buffer to read + /// the offset at which to start + /// the number of bytes to read + /// the number of bytes actually read + public override int Read(byte[] buffer, int offset, int count) + { + int bytesToRead = count; + + // Need to limit the # of bytes returned, if the stream is intended to have + // a definite length. This is especially useful when returning a stream for + // the uncompressed data directly to the application. The app won't + // necessarily read only the UncompressedSize number of bytes. For example + // wrapping the stream returned from OpenReader() into a StreadReader() and + // calling ReadToEnd() on it, We can "over-read" the zip data and get a + // corrupt string. The length limits that, prevents that problem. + + if (_lengthLimit != CrcCalculatorStream.UnsetLengthLimit) + { + if (_Crc32.TotalBytesRead >= _lengthLimit) return 0; // EOF + Int64 bytesRemaining = _lengthLimit - _Crc32.TotalBytesRead; + if (bytesRemaining < count) bytesToRead = (int)bytesRemaining; + } + int n = _innerStream.Read(buffer, offset, bytesToRead); + if (n > 0) _Crc32.SlurpBlock(buffer, offset, n); + return n; + } + + /// + /// Write to the stream. + /// + /// the buffer from which to write + /// the offset at which to start writing + /// the number of bytes to write + public override void Write(byte[] buffer, int offset, int count) + { + if (count > 0) _Crc32.SlurpBlock(buffer, offset, count); + _innerStream.Write(buffer, offset, count); + } + + /// + /// Indicates whether the stream supports reading. + /// + public override bool CanRead + { + get { return _innerStream.CanRead; } + } + + /// + /// Indicates whether the stream supports seeking. + /// + public override bool CanSeek + { + get { return _innerStream.CanSeek; } + } + + /// + /// Indicates whether the stream supports writing. + /// + public override bool CanWrite + { + get { return _innerStream.CanWrite; } + } + + /// + /// Flush the stream. + /// + public override void Flush() + { + _innerStream.Flush(); + } + + /// + /// Not implemented. + /// + public override long Length + { + get + { + if (_lengthLimit == CrcCalculatorStream.UnsetLengthLimit) + return _innerStream.Length; + else return _lengthLimit; + } + } + + /// + /// Not implemented. + /// + public override long Position + { + get { return _Crc32.TotalBytesRead; } + set { throw new NotImplementedException(); } + } + + /// + /// Not implemented. + /// + /// N/A + /// N/A + /// N/A + public override long Seek(long offset, System.IO.SeekOrigin origin) + { + throw new NotImplementedException(); + } + + /// + /// Not implemented. + /// + /// N/A + public override void SetLength(long value) + { + throw new NotImplementedException(); + } + + + void IDisposable.Dispose() + { + Close(); + } + + /// + /// Closes the stream. + /// + public override void Close() + { + base.Close(); + if (!_leaveOpen) + _innerStream.Close(); + } + + } +} diff --git a/NBToolkit/NBToolkit/Zlib/Deflate.cs b/NBToolkit/NBToolkit/Zlib/Deflate.cs new file mode 100644 index 0000000..1b68932 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/Deflate.cs @@ -0,0 +1,1927 @@ +// Deflate.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-October-28 13:44:59> +// +// ------------------------------------------------------------------ +// +// This module defines logic for handling the Deflate or compression. +// +// This code is based on multiple sources: +// - the original zlib v1.2.3 source, which is Copyright (C) 1995-2005 Jean-loup Gailly. +// - the original jzlib, which is Copyright (c) 2000-2003 ymnk, JCraft,Inc. +// +// However, this code is significantly different from both. +// The object model is not the same, and many of the behaviors are different. +// +// In keeping with the license for these other works, the copyrights for +// jzlib and zlib are here. +// +// ----------------------------------------------------------------------- +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + + +using System; + +namespace Ionic.Zlib +{ + + /// + /// Describes how to flush the current deflate operation. + /// + /// + /// The different FlushType values are useful when using a Deflate in a streaming application. + /// + public enum FlushType + { + /// No flush at all. + None = 0, + + /// Closes the current block, but doesn't flush it to + /// the output. Used internally only in hypothetical + /// scenarios. This was supposed to be removed by Zlib, but it is + /// still in use in some edge cases. + /// + Partial, + + /// + /// Use this during compression to specify that all pending output should be + /// flushed to the output buffer and the output should be aligned on a byte + /// boundary. You might use this in a streaming communication scenario, so that + /// the decompressor can get all input data available so far. When using this + /// with a ZlibCodec, AvailableBytesIn will be zero after the call if + /// enough output space has been provided before the call. Flushing will + /// degrade compression and so it should be used only when necessary. + /// + Sync, + + /// + /// Use this during compression to specify that all output should be flushed, as + /// with FlushType.Sync, but also, the compression state should be reset + /// so that decompression can restart from this point if previous compressed + /// data has been damaged or if random access is desired. Using + /// FlushType.Full too often can significantly degrade the compression. + /// + Full, + + /// Signals the end of the compression/decompression stream. + Finish, + } + + internal enum BlockState + { + NeedMore = 0, // block not completed, need more input or more output + BlockDone, // block flush performed + FinishStarted, // finish started, need only more output at next deflate + FinishDone // finish done, accept no more input or output + } + + internal enum DeflateFlavor + { + Store, + Fast, + Slow + } + + internal sealed class DeflateManager + { + private static readonly int MEM_LEVEL_MAX = 9; + private static readonly int MEM_LEVEL_DEFAULT = 8; + + internal delegate BlockState CompressFunc(FlushType flush); + + internal class Config + { + // Use a faster search when the previous match is longer than this + internal int GoodLength; // reduce lazy search above this match length + + // Attempt to find a better match only when the current match is + // strictly smaller than this value. This mechanism is used only for + // compression levels >= 4. For levels 1,2,3: MaxLazy is actually + // MaxInsertLength. (See DeflateFast) + + internal int MaxLazy; // do not perform lazy search above this match length + + internal int NiceLength; // quit search above this match length + + // To speed up deflation, hash chains are never searched beyond this + // length. A higher limit improves compression ratio but degrades the speed. + + internal int MaxChainLength; + + internal DeflateFlavor Flavor; + + private Config(int goodLength, int maxLazy, int niceLength, int maxChainLength, DeflateFlavor flavor) + { + this.GoodLength = goodLength; + this.MaxLazy = maxLazy; + this.NiceLength = niceLength; + this.MaxChainLength = maxChainLength; + this.Flavor = flavor; + } + + public static Config Lookup(CompressionLevel level) + { + return Table[(int)level]; + } + + + static Config() + { + Table = new Config[] { + new Config(0, 0, 0, 0, DeflateFlavor.Store), + new Config(4, 4, 8, 4, DeflateFlavor.Fast), + new Config(4, 5, 16, 8, DeflateFlavor.Fast), + new Config(4, 6, 32, 32, DeflateFlavor.Fast), + + new Config(4, 4, 16, 16, DeflateFlavor.Slow), + new Config(8, 16, 32, 32, DeflateFlavor.Slow), + new Config(8, 16, 128, 128, DeflateFlavor.Slow), + new Config(8, 32, 128, 256, DeflateFlavor.Slow), + new Config(32, 128, 258, 1024, DeflateFlavor.Slow), + new Config(32, 258, 258, 4096, DeflateFlavor.Slow), + }; + } + + private static readonly Config[] Table; + } + + + private CompressFunc DeflateFunction; + + private static readonly System.String[] _ErrorMessage = new System.String[] + { + "need dictionary", + "stream end", + "", + "file error", + "stream error", + "data error", + "insufficient memory", + "buffer error", + "incompatible version", + "" + }; + + // preset dictionary flag in zlib header + private static readonly int PRESET_DICT = 0x20; + + private static readonly int INIT_STATE = 42; + private static readonly int BUSY_STATE = 113; + private static readonly int FINISH_STATE = 666; + + // The deflate compression method + private static readonly int Z_DEFLATED = 8; + + private static readonly int STORED_BLOCK = 0; + private static readonly int STATIC_TREES = 1; + private static readonly int DYN_TREES = 2; + + // The three kinds of block type + private static readonly int Z_BINARY = 0; + private static readonly int Z_ASCII = 1; + private static readonly int Z_UNKNOWN = 2; + + private static readonly int Buf_size = 8 * 2; + + private static readonly int MIN_MATCH = 3; + private static readonly int MAX_MATCH = 258; + + private static readonly int MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1); + + private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1); + + private static readonly int END_BLOCK = 256; + + internal ZlibCodec _codec; // the zlib encoder/decoder + internal int status; // as the name implies + internal byte[] pending; // output still pending - waiting to be compressed + internal int nextPending; // index of next pending byte to output to the stream + internal int pendingCount; // number of bytes in the pending buffer + + internal sbyte data_type; // UNKNOWN, BINARY or ASCII + internal int last_flush; // value of flush param for previous deflate call + + internal int w_size; // LZ77 window size (32K by default) + internal int w_bits; // log2(w_size) (8..16) + internal int w_mask; // w_size - 1 + + //internal byte[] dictionary; + internal byte[] window; + + // Sliding window. Input bytes are read into the second half of the window, + // and move to the first half later to keep a dictionary of at least wSize + // bytes. With this organization, matches are limited to a distance of + // wSize-MAX_MATCH bytes, but this ensures that IO is always + // performed with a length multiple of the block size. + // + // To do: use the user input buffer as sliding window. + + internal int window_size; + // Actual size of window: 2*wSize, except when the user input buffer + // is directly used as sliding window. + + internal short[] prev; + // Link to older string with same hash index. To limit the size of this + // array to 64K, this link is maintained only for the last 32K strings. + // An index in this array is thus a window index modulo 32K. + + internal short[] head; // Heads of the hash chains or NIL. + + internal int ins_h; // hash index of string to be inserted + internal int hash_size; // number of elements in hash table + internal int hash_bits; // log2(hash_size) + internal int hash_mask; // hash_size-1 + + // Number of bits by which ins_h must be shifted at each input + // step. It must be such that after MIN_MATCH steps, the oldest + // byte no longer takes part in the hash key, that is: + // hash_shift * MIN_MATCH >= hash_bits + internal int hash_shift; + + // Window position at the beginning of the current output block. Gets + // negative when the window is moved backwards. + + internal int block_start; + + Config config; + internal int match_length; // length of best match + internal int prev_match; // previous match + internal int match_available; // set if previous match exists + internal int strstart; // start of string to insert into.....???? + internal int match_start; // start of matching string + internal int lookahead; // number of valid bytes ahead in window + + // Length of the best match at previous step. Matches not greater than this + // are discarded. This is used in the lazy match evaluation. + internal int prev_length; + + // Insert new strings in the hash table only if the match length is not + // greater than this length. This saves time but degrades compression. + // max_insert_length is used only for compression levels <= 3. + + internal CompressionLevel compressionLevel; // compression level (1..9) + internal CompressionStrategy compressionStrategy; // favor or force Huffman coding + + + internal short[] dyn_ltree; // literal and length tree + internal short[] dyn_dtree; // distance tree + internal short[] bl_tree; // Huffman tree for bit lengths + + internal Tree treeLiterals = new Tree(); // desc for literal tree + internal Tree treeDistances = new Tree(); // desc for distance tree + internal Tree treeBitLengths = new Tree(); // desc for bit length tree + + // number of codes at each bit length for an optimal tree + internal short[] bl_count = new short[InternalConstants.MAX_BITS + 1]; + + // heap used to build the Huffman trees + internal int[] heap = new int[2 * InternalConstants.L_CODES + 1]; + + internal int heap_len; // number of elements in the heap + internal int heap_max; // element of largest frequency + + // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + // The same heap array is used to build all trees. + + // Depth of each subtree used as tie breaker for trees of equal frequency + internal sbyte[] depth = new sbyte[2 * InternalConstants.L_CODES + 1]; + + internal int _lengthOffset; // index for literals or lengths + + + // Size of match buffer for literals/lengths. There are 4 reasons for + // limiting lit_bufsize to 64K: + // - frequencies can be kept in 16 bit counters + // - if compression is not successful for the first block, all input + // data is still in the window so we can still emit a stored block even + // when input comes from standard input. (This can also be done for + // all blocks if lit_bufsize is not greater than 32K.) + // - if compression is not successful for a file smaller than 64K, we can + // even emit a stored file instead of a stored block (saving 5 bytes). + // This is applicable only for zip (not gzip or zlib). + // - creating new Huffman trees less frequently may not provide fast + // adaptation to changes in the input data statistics. (Take for + // example a binary file with poorly compressible code followed by + // a highly compressible string table.) Smaller buffer sizes give + // fast adaptation but have of course the overhead of transmitting + // trees more frequently. + + internal int lit_bufsize; + + internal int last_lit; // running index in l_buf + + // Buffer for distances. To simplify the code, d_buf and l_buf have + // the same number of elements. To use different lengths, an extra flag + // array would be necessary. + + internal int _distanceOffset; // index into pending; points to distance data?? + + internal int opt_len; // bit length of current block with optimal trees + internal int static_len; // bit length of current block with static trees + internal int matches; // number of string matches in current block + internal int last_eob_len; // bit length of EOB code for last block + + // Output buffer. bits are inserted starting at the bottom (least + // significant bits). + internal short bi_buf; + + // Number of valid bits in bi_buf. All bits above the last valid bit + // are always zero. + internal int bi_valid; + + + internal DeflateManager() + { + dyn_ltree = new short[HEAP_SIZE * 2]; + dyn_dtree = new short[(2 * InternalConstants.D_CODES + 1) * 2]; // distance tree + bl_tree = new short[(2 * InternalConstants.BL_CODES + 1) * 2]; // Huffman tree for bit lengths + } + + + // lm_init + private void _InitializeLazyMatch() + { + window_size = 2 * w_size; + + // clear the hash - workitem 9063 + Array.Clear(head, 0, hash_size); + //for (int i = 0; i < hash_size; i++) head[i] = 0; + + config = Config.Lookup(compressionLevel); + SetDeflater(); + + strstart = 0; + block_start = 0; + lookahead = 0; + match_length = prev_length = MIN_MATCH - 1; + match_available = 0; + ins_h = 0; + } + + // Initialize the tree data structures for a new zlib stream. + private void _InitializeTreeData() + { + treeLiterals.dyn_tree = dyn_ltree; + treeLiterals.staticTree = StaticTree.Literals; + + treeDistances.dyn_tree = dyn_dtree; + treeDistances.staticTree = StaticTree.Distances; + + treeBitLengths.dyn_tree = bl_tree; + treeBitLengths.staticTree = StaticTree.BitLengths; + + bi_buf = 0; + bi_valid = 0; + last_eob_len = 8; // enough lookahead for inflate + + // Initialize the first block of the first file: + _InitializeBlocks(); + } + + internal void _InitializeBlocks() + { + // Initialize the trees. + for (int i = 0; i < InternalConstants.L_CODES; i++) + dyn_ltree[i * 2] = 0; + for (int i = 0; i < InternalConstants.D_CODES; i++) + dyn_dtree[i * 2] = 0; + for (int i = 0; i < InternalConstants.BL_CODES; i++) + bl_tree[i * 2] = 0; + + dyn_ltree[END_BLOCK * 2] = 1; + opt_len = static_len = 0; + last_lit = matches = 0; + } + + // Restore the heap property by moving down the tree starting at node k, + // exchanging a node with the smallest of its two sons if necessary, stopping + // when the heap property is re-established (each father smaller than its + // two sons). + internal void pqdownheap(short[] tree, int k) + { + int v = heap[k]; + int j = k << 1; // left son of k + while (j <= heap_len) + { + // Set j to the smallest of the two sons: + if (j < heap_len && _IsSmaller(tree, heap[j + 1], heap[j], depth)) + { + j++; + } + // Exit if v is smaller than both sons + if (_IsSmaller(tree, v, heap[j], depth)) + break; + + // Exchange v with the smallest son + heap[k] = heap[j]; k = j; + // And continue down the tree, setting j to the left son of k + j <<= 1; + } + heap[k] = v; + } + + internal static bool _IsSmaller(short[] tree, int n, int m, sbyte[] depth) + { + short tn2 = tree[n * 2]; + short tm2 = tree[m * 2]; + return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m])); + } + + + // Scan a literal or distance tree to determine the frequencies of the codes + // in the bit length tree. + internal void scan_tree(short[] tree, int max_code) + { + int n; // iterates over all tree elements + int prevlen = -1; // last emitted length + int curlen; // length of current code + int nextlen = (int)tree[0 * 2 + 1]; // length of next code + int count = 0; // repeat count of the current code + int max_count = 7; // max repeat count + int min_count = 4; // min repeat count + + if (nextlen == 0) + { + max_count = 138; min_count = 3; + } + tree[(max_code + 1) * 2 + 1] = (short)0x7fff; // guard //?? + + for (n = 0; n <= max_code; n++) + { + curlen = nextlen; nextlen = (int)tree[(n + 1) * 2 + 1]; + if (++count < max_count && curlen == nextlen) + { + continue; + } + else if (count < min_count) + { + bl_tree[curlen * 2] = (short)(bl_tree[curlen * 2] + count); + } + else if (curlen != 0) + { + if (curlen != prevlen) + bl_tree[curlen * 2]++; + bl_tree[InternalConstants.REP_3_6 * 2]++; + } + else if (count <= 10) + { + bl_tree[InternalConstants.REPZ_3_10 * 2]++; + } + else + { + bl_tree[InternalConstants.REPZ_11_138 * 2]++; + } + count = 0; prevlen = curlen; + if (nextlen == 0) + { + max_count = 138; min_count = 3; + } + else if (curlen == nextlen) + { + max_count = 6; min_count = 3; + } + else + { + max_count = 7; min_count = 4; + } + } + } + + // Construct the Huffman tree for the bit lengths and return the index in + // bl_order of the last bit length code to send. + internal int build_bl_tree() + { + int max_blindex; // index of last bit length code of non zero freq + + // Determine the bit length frequencies for literal and distance trees + scan_tree(dyn_ltree, treeLiterals.max_code); + scan_tree(dyn_dtree, treeDistances.max_code); + + // Build the bit length tree: + treeBitLengths.build_tree(this); + // opt_len now includes the length of the tree representations, except + // the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + + // Determine the number of bit length codes to send. The pkzip format + // requires that at least 4 bit length codes be sent. (appnote.txt says + // 3 but the actual value used is 4.) + for (max_blindex = InternalConstants.BL_CODES - 1; max_blindex >= 3; max_blindex--) + { + if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] != 0) + break; + } + // Update opt_len to include the bit length tree and counts + opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; + + return max_blindex; + } + + + // Send the header for a block using dynamic Huffman trees: the counts, the + // lengths of the bit length codes, the literal tree and the distance tree. + // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + internal void send_all_trees(int lcodes, int dcodes, int blcodes) + { + int rank; // index in bl_order + + send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt + send_bits(dcodes - 1, 5); + send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt + for (rank = 0; rank < blcodes; rank++) + { + send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3); + } + send_tree(dyn_ltree, lcodes - 1); // literal tree + send_tree(dyn_dtree, dcodes - 1); // distance tree + } + + // Send a literal or distance tree in compressed form, using the codes in + // bl_tree. + internal void send_tree(short[] tree, int max_code) + { + int n; // iterates over all tree elements + int prevlen = -1; // last emitted length + int curlen; // length of current code + int nextlen = tree[0 * 2 + 1]; // length of next code + int count = 0; // repeat count of the current code + int max_count = 7; // max repeat count + int min_count = 4; // min repeat count + + if (nextlen == 0) + { + max_count = 138; min_count = 3; + } + + for (n = 0; n <= max_code; n++) + { + curlen = nextlen; nextlen = tree[(n + 1) * 2 + 1]; + if (++count < max_count && curlen == nextlen) + { + continue; + } + else if (count < min_count) + { + do + { + send_code(curlen, bl_tree); + } + while (--count != 0); + } + else if (curlen != 0) + { + if (curlen != prevlen) + { + send_code(curlen, bl_tree); count--; + } + send_code(InternalConstants.REP_3_6, bl_tree); + send_bits(count - 3, 2); + } + else if (count <= 10) + { + send_code(InternalConstants.REPZ_3_10, bl_tree); + send_bits(count - 3, 3); + } + else + { + send_code(InternalConstants.REPZ_11_138, bl_tree); + send_bits(count - 11, 7); + } + count = 0; prevlen = curlen; + if (nextlen == 0) + { + max_count = 138; min_count = 3; + } + else if (curlen == nextlen) + { + max_count = 6; min_count = 3; + } + else + { + max_count = 7; min_count = 4; + } + } + } + + // Output a block of bytes on the stream. + // IN assertion: there is enough room in pending_buf. + private void put_bytes(byte[] p, int start, int len) + { + Array.Copy(p, start, pending, pendingCount, len); + pendingCount += len; + } + +#if NOTNEEDED + private void put_byte(byte c) + { + pending[pendingCount++] = c; + } + internal void put_short(int b) + { + unchecked + { + pending[pendingCount++] = (byte)b; + pending[pendingCount++] = (byte)(b >> 8); + } + } + internal void putShortMSB(int b) + { + unchecked + { + pending[pendingCount++] = (byte)(b >> 8); + pending[pendingCount++] = (byte)b; + } + } +#endif + + internal void send_code(int c, short[] tree) + { + int c2 = c * 2; + send_bits((tree[c2] & 0xffff), (tree[c2 + 1] & 0xffff)); + } + + internal void send_bits(int value, int length) + { + int len = length; + unchecked + { + if (bi_valid > (int)Buf_size - len) + { + //int val = value; + // bi_buf |= (val << bi_valid); + + bi_buf |= (short)((value << bi_valid) & 0xffff); + //put_short(bi_buf); + pending[pendingCount++] = (byte)bi_buf; + pending[pendingCount++] = (byte)(bi_buf >> 8); + + + bi_buf = (short)((uint)value >> (Buf_size - bi_valid)); + bi_valid += len - Buf_size; + } + else + { + // bi_buf |= (value) << bi_valid; + bi_buf |= (short)((value << bi_valid) & 0xffff); + bi_valid += len; + } + } + } + + // Send one empty static block to give enough lookahead for inflate. + // This takes 10 bits, of which 7 may remain in the bit buffer. + // The current inflate code requires 9 bits of lookahead. If the + // last two codes for the previous block (real code plus EOB) were coded + // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode + // the last real code. In this case we send two empty static blocks instead + // of one. (There are no problems if the previous block is stored or fixed.) + // To simplify the code, we assume the worst case of last real code encoded + // on one bit only. + internal void _tr_align() + { + send_bits(STATIC_TREES << 1, 3); + send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes); + + bi_flush(); + + // Of the 10 bits for the empty block, we have already sent + // (10 - bi_valid) bits. The lookahead for the last real code (before + // the EOB of the previous block) was thus at least one plus the length + // of the EOB plus what we have just sent of the empty static block. + if (1 + last_eob_len + 10 - bi_valid < 9) + { + send_bits(STATIC_TREES << 1, 3); + send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes); + bi_flush(); + } + last_eob_len = 7; + } + + + // Save the match info and tally the frequency counts. Return true if + // the current block must be flushed. + internal bool _tr_tally(int dist, int lc) + { + pending[_distanceOffset + last_lit * 2] = unchecked((byte) ( (uint)dist >> 8 ) ); + pending[_distanceOffset + last_lit * 2 + 1] = unchecked((byte)dist); + pending[_lengthOffset + last_lit] = unchecked((byte)lc); + last_lit++; + + if (dist == 0) + { + // lc is the unmatched char + dyn_ltree[lc * 2]++; + } + else + { + matches++; + // Here, lc is the match length - MIN_MATCH + dist--; // dist = match distance - 1 + dyn_ltree[(Tree.LengthCode[lc] + InternalConstants.LITERALS + 1) * 2]++; + dyn_dtree[Tree.DistanceCode(dist) * 2]++; + } + + if ((last_lit & 0x1fff) == 0 && (int)compressionLevel > 2) + { + // Compute an upper bound for the compressed length + int out_length = last_lit << 3; + int in_length = strstart - block_start; + int dcode; + for (dcode = 0; dcode < InternalConstants.D_CODES; dcode++) + { + out_length = (int)(out_length + (int)dyn_dtree[dcode * 2] * (5L + Tree.ExtraDistanceBits[dcode])); + } + out_length >>= 3; + if ((matches < (last_lit / 2)) && out_length < in_length / 2) + return true; + } + + return (last_lit == lit_bufsize - 1) || (last_lit == lit_bufsize); + // dinoch - wraparound? + // We avoid equality with lit_bufsize because of wraparound at 64K + // on 16 bit machines and because stored blocks are restricted to + // 64K-1 bytes. + } + + + + // Send the block data compressed using the given Huffman trees + internal void send_compressed_block(short[] ltree, short[] dtree) + { + int distance; // distance of matched string + int lc; // match length or unmatched char (if dist == 0) + int lx = 0; // running index in l_buf + int code; // the code to send + int extra; // number of extra bits to send + + if (last_lit != 0) + { + do + { + int ix = _distanceOffset + lx * 2; + distance = ((pending[ix] << 8) & 0xff00) | + (pending[ix + 1] & 0xff); + lc = (pending[_lengthOffset + lx]) & 0xff; + lx++; + + if (distance == 0) + { + send_code(lc, ltree); // send a literal byte + } + else + { + // literal or match pair + // Here, lc is the match length - MIN_MATCH + code = Tree.LengthCode[lc]; + + // send the length code + send_code(code + InternalConstants.LITERALS + 1, ltree); + extra = Tree.ExtraLengthBits[code]; + if (extra != 0) + { + // send the extra length bits + lc -= Tree.LengthBase[code]; + send_bits(lc, extra); + } + distance--; // dist is now the match distance - 1 + code = Tree.DistanceCode(distance); + + // send the distance code + send_code(code, dtree); + + extra = Tree.ExtraDistanceBits[code]; + if (extra != 0) + { + // send the extra distance bits + distance -= Tree.DistanceBase[code]; + send_bits(distance, extra); + } + } + + // Check that the overlay between pending and d_buf+l_buf is ok: + } + while (lx < last_lit); + } + + send_code(END_BLOCK, ltree); + last_eob_len = ltree[END_BLOCK * 2 + 1]; + } + + + + // Set the data type to ASCII or BINARY, using a crude approximation: + // binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise. + // IN assertion: the fields freq of dyn_ltree are set and the total of all + // frequencies does not exceed 64K (to fit in an int on 16 bit machines). + internal void set_data_type() + { + int n = 0; + int ascii_freq = 0; + int bin_freq = 0; + while (n < 7) + { + bin_freq += dyn_ltree[n * 2]; n++; + } + while (n < 128) + { + ascii_freq += dyn_ltree[n * 2]; n++; + } + while (n < InternalConstants.LITERALS) + { + bin_freq += dyn_ltree[n * 2]; n++; + } + data_type = (sbyte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII); + } + + + + // Flush the bit buffer, keeping at most 7 bits in it. + internal void bi_flush() + { + if (bi_valid == 16) + { + pending[pendingCount++] = (byte)bi_buf; + pending[pendingCount++] = (byte)(bi_buf >> 8); + bi_buf = 0; + bi_valid = 0; + } + else if (bi_valid >= 8) + { + //put_byte((byte)bi_buf); + pending[pendingCount++] = (byte)bi_buf; + bi_buf >>= 8; + bi_valid -= 8; + } + } + + // Flush the bit buffer and align the output on a byte boundary + internal void bi_windup() + { + if (bi_valid > 8) + { + pending[pendingCount++] = (byte)bi_buf; + pending[pendingCount++] = (byte)(bi_buf >> 8); + } + else if (bi_valid > 0) + { + //put_byte((byte)bi_buf); + pending[pendingCount++] = (byte)bi_buf; + } + bi_buf = 0; + bi_valid = 0; + } + + // Copy a stored block, storing first the length and its + // one's complement if requested. + internal void copy_block(int buf, int len, bool header) + { + bi_windup(); // align on byte boundary + last_eob_len = 8; // enough lookahead for inflate + + if (header) + unchecked + { + //put_short((short)len); + pending[pendingCount++] = (byte)len; + pending[pendingCount++] = (byte)(len >> 8); + //put_short((short)~len); + pending[pendingCount++] = (byte)~len; + pending[pendingCount++] = (byte)(~len >> 8); + } + + put_bytes(window, buf, len); + } + + internal void flush_block_only(bool eof) + { + _tr_flush_block(block_start >= 0 ? block_start : -1, strstart - block_start, eof); + block_start = strstart; + _codec.flush_pending(); + } + + // Copy without compression as much as possible from the input stream, return + // the current block state. + // This function does not insert new strings in the dictionary since + // uncompressible data is probably not useful. This function is used + // only for the level=0 compression option. + // NOTE: this function should be optimized to avoid extra copying from + // window to pending_buf. + internal BlockState DeflateNone(FlushType flush) + { + // Stored blocks are limited to 0xffff bytes, pending is limited + // to pending_buf_size, and each stored block has a 5 byte header: + + int max_block_size = 0xffff; + int max_start; + + if (max_block_size > pending.Length - 5) + { + max_block_size = pending.Length - 5; + } + + // Copy as much as possible from input to output: + while (true) + { + // Fill the window as much as possible: + if (lookahead <= 1) + { + _fillWindow(); + if (lookahead == 0 && flush == FlushType.None) + return BlockState.NeedMore; + if (lookahead == 0) + break; // flush the current block + } + + strstart += lookahead; + lookahead = 0; + + // Emit a stored block if pending will be full: + max_start = block_start + max_block_size; + if (strstart == 0 || strstart >= max_start) + { + // strstart == 0 is possible when wraparound on 16-bit machine + lookahead = (int)(strstart - max_start); + strstart = (int)max_start; + + flush_block_only(false); + if (_codec.AvailableBytesOut == 0) + return BlockState.NeedMore; + } + + // Flush if we may have to slide, otherwise block_start may become + // negative and the data will be gone: + if (strstart - block_start >= w_size - MIN_LOOKAHEAD) + { + flush_block_only(false); + if (_codec.AvailableBytesOut == 0) + return BlockState.NeedMore; + } + } + + flush_block_only(flush == FlushType.Finish); + if (_codec.AvailableBytesOut == 0) + return (flush == FlushType.Finish) ? BlockState.FinishStarted : BlockState.NeedMore; + + return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone; + } + + + // Send a stored block + internal void _tr_stored_block(int buf, int stored_len, bool eof) + { + send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type + copy_block(buf, stored_len, true); // with header + } + + // Determine the best encoding for the current block: dynamic trees, static + // trees or store, and output the encoded block to the zip file. + internal void _tr_flush_block(int buf, int stored_len, bool eof) + { + int opt_lenb, static_lenb; // opt_len and static_len in bytes + int max_blindex = 0; // index of last bit length code of non zero freq + + // Build the Huffman trees unless a stored block is forced + if (compressionLevel > 0) + { + // Check if the file is ascii or binary + if (data_type == Z_UNKNOWN) + set_data_type(); + + // Construct the literal and distance trees + treeLiterals.build_tree(this); + + treeDistances.build_tree(this); + + // At this point, opt_len and static_len are the total bit lengths of + // the compressed block data, excluding the tree representations. + + // Build the bit length tree for the above two trees, and get the index + // in bl_order of the last bit length code to send. + max_blindex = build_bl_tree(); + + // Determine the best encoding. Compute first the block length in bytes + opt_lenb = (opt_len + 3 + 7) >> 3; + static_lenb = (static_len + 3 + 7) >> 3; + + if (static_lenb <= opt_lenb) + opt_lenb = static_lenb; + } + else + { + opt_lenb = static_lenb = stored_len + 5; // force a stored block + } + + if (stored_len + 4 <= opt_lenb && buf != -1) + { + // 4: two words for the lengths + // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + // Otherwise we can't have processed more than WSIZE input bytes since + // the last block flush, because compression would have been + // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + // transform a block into a stored block. + _tr_stored_block(buf, stored_len, eof); + } + else if (static_lenb == opt_lenb) + { + send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3); + send_compressed_block(StaticTree.lengthAndLiteralsTreeCodes, StaticTree.distTreeCodes); + } + else + { + send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3); + send_all_trees(treeLiterals.max_code + 1, treeDistances.max_code + 1, max_blindex + 1); + send_compressed_block(dyn_ltree, dyn_dtree); + } + + // The above check is made mod 2^32, for files larger than 512 MB + // and uLong implemented on 32 bits. + + _InitializeBlocks(); + + if (eof) + { + bi_windup(); + } + } + + // Fill the window when the lookahead becomes insufficient. + // Updates strstart and lookahead. + // + // IN assertion: lookahead < MIN_LOOKAHEAD + // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + // At least one byte has been read, or avail_in == 0; reads are + // performed for at least two bytes (required for the zip translate_eol + // option -- not supported here). + private void _fillWindow() + { + int n, m; + int p; + int more; // Amount of free space at the end of the window. + + do + { + more = (window_size - lookahead - strstart); + + // Deal with !@#$% 64K limit: + if (more == 0 && strstart == 0 && lookahead == 0) + { + more = w_size; + } + else if (more == -1) + { + // Very unlikely, but possible on 16 bit machine if strstart == 0 + // and lookahead == 1 (input done one byte at time) + more--; + + // If the window is almost full and there is insufficient lookahead, + // move the upper half to the lower one to make room in the upper half. + } + else if (strstart >= w_size + w_size - MIN_LOOKAHEAD) + { + Array.Copy(window, w_size, window, 0, w_size); + match_start -= w_size; + strstart -= w_size; // we now have strstart >= MAX_DIST + block_start -= w_size; + + // Slide the hash table (could be avoided with 32 bit values + // at the expense of memory usage). We slide even when level == 0 + // to keep the hash table consistent if we switch back to level > 0 + // later. (Using level 0 permanently is not an optimal usage of + // zlib, so we don't care about this pathological case.) + + n = hash_size; + p = n; + do + { + m = (head[--p] & 0xffff); + head[p] = (short)((m >= w_size) ? (m - w_size) : 0); + } + while (--n != 0); + + n = w_size; + p = n; + do + { + m = (prev[--p] & 0xffff); + prev[p] = (short)((m >= w_size) ? (m - w_size) : 0); + // If n is not on any hash chain, prev[n] is garbage but + // its value will never be used. + } + while (--n != 0); + more += w_size; + } + + if (_codec.AvailableBytesIn == 0) + return; + + // If there was no sliding: + // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + // more == window_size - lookahead - strstart + // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + // => more >= window_size - 2*WSIZE + 2 + // In the BIG_MEM or MMAP case (not yet supported), + // window_size == input_size + MIN_LOOKAHEAD && + // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + // Otherwise, window_size == 2*WSIZE so more >= 2. + // If there was sliding, more >= WSIZE. So in all cases, more >= 2. + + n = _codec.read_buf(window, strstart + lookahead, more); + lookahead += n; + + // Initialize the hash value now that we have some input: + if (lookahead >= MIN_MATCH) + { + ins_h = window[strstart] & 0xff; + ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask; + } + // If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + // but this is not important since only literal bytes will be emitted. + } + while (lookahead < MIN_LOOKAHEAD && _codec.AvailableBytesIn != 0); + } + + // Compress as much as possible from the input stream, return the current + // block state. + // This function does not perform lazy evaluation of matches and inserts + // new strings in the dictionary only for unmatched strings or for short + // matches. It is used only for the fast compression options. + internal BlockState DeflateFast(FlushType flush) + { + // short hash_head = 0; // head of the hash chain + int hash_head = 0; // head of the hash chain + bool bflush; // set if current block must be flushed + + while (true) + { + // Make sure that we always have enough lookahead, except + // at the end of the input file. We need MAX_MATCH bytes + // for the next match, plus MIN_MATCH bytes to insert the + // string following the next match. + if (lookahead < MIN_LOOKAHEAD) + { + _fillWindow(); + if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None) + { + return BlockState.NeedMore; + } + if (lookahead == 0) + break; // flush the current block + } + + // Insert the string window[strstart .. strstart+2] in the + // dictionary, and set hash_head to the head of the hash chain: + if (lookahead >= MIN_MATCH) + { + ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; + + // prev[strstart&w_mask]=hash_head=head[ins_h]; + hash_head = (head[ins_h] & 0xffff); + prev[strstart & w_mask] = head[ins_h]; + head[ins_h] = unchecked((short)strstart); + } + + // Find the longest match, discarding those <= prev_length. + // At this point we have always match_length < MIN_MATCH + + if (hash_head != 0L && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) + { + // To simplify the code, we prevent matches with the string + // of window index 0 (in particular we have to avoid a match + // of the string with itself at the start of the input file). + if (compressionStrategy != CompressionStrategy.HuffmanOnly) + { + match_length = longest_match(hash_head); + } + // longest_match() sets match_start + } + if (match_length >= MIN_MATCH) + { + // check_match(strstart, match_start, match_length); + + bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH); + + lookahead -= match_length; + + // Insert new strings in the hash table only if the match length + // is not too large. This saves time but degrades compression. + if (match_length <= config.MaxLazy && lookahead >= MIN_MATCH) + { + match_length--; // string at strstart already in hash table + do + { + strstart++; + + ins_h = ((ins_h << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; + // prev[strstart&w_mask]=hash_head=head[ins_h]; + hash_head = (head[ins_h] & 0xffff); + prev[strstart & w_mask] = head[ins_h]; + head[ins_h] = unchecked((short)strstart); + + // strstart never exceeds WSIZE-MAX_MATCH, so there are + // always MIN_MATCH bytes ahead. + } + while (--match_length != 0); + strstart++; + } + else + { + strstart += match_length; + match_length = 0; + ins_h = window[strstart] & 0xff; + + ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask; + // If lookahead < MIN_MATCH, ins_h is garbage, but it does not + // matter since it will be recomputed at next deflate call. + } + } + else + { + // No match, output a literal byte + + bflush = _tr_tally(0, window[strstart] & 0xff); + lookahead--; + strstart++; + } + if (bflush) + { + flush_block_only(false); + if (_codec.AvailableBytesOut == 0) + return BlockState.NeedMore; + } + } + + flush_block_only(flush == FlushType.Finish); + if (_codec.AvailableBytesOut == 0) + { + if (flush == FlushType.Finish) + return BlockState.FinishStarted; + else + return BlockState.NeedMore; + } + return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone; + } + + // Same as above, but achieves better compression. We use a lazy + // evaluation for matches: a match is finally adopted only if there is + // no better match at the next window position. + internal BlockState DeflateSlow(FlushType flush) + { + // short hash_head = 0; // head of hash chain + int hash_head = 0; // head of hash chain + bool bflush; // set if current block must be flushed + + // Process the input block. + while (true) + { + // Make sure that we always have enough lookahead, except + // at the end of the input file. We need MAX_MATCH bytes + // for the next match, plus MIN_MATCH bytes to insert the + // string following the next match. + + if (lookahead < MIN_LOOKAHEAD) + { + _fillWindow(); + if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None) + return BlockState.NeedMore; + + if (lookahead == 0) + break; // flush the current block + } + + // Insert the string window[strstart .. strstart+2] in the + // dictionary, and set hash_head to the head of the hash chain: + + if (lookahead >= MIN_MATCH) + { + ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; + // prev[strstart&w_mask]=hash_head=head[ins_h]; + hash_head = (head[ins_h] & 0xffff); + prev[strstart & w_mask] = head[ins_h]; + head[ins_h] = unchecked((short)strstart); + } + + // Find the longest match, discarding those <= prev_length. + prev_length = match_length; + prev_match = match_start; + match_length = MIN_MATCH - 1; + + if (hash_head != 0 && prev_length < config.MaxLazy && + ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) + { + // To simplify the code, we prevent matches with the string + // of window index 0 (in particular we have to avoid a match + // of the string with itself at the start of the input file). + + if (compressionStrategy != CompressionStrategy.HuffmanOnly) + { + match_length = longest_match(hash_head); + } + // longest_match() sets match_start + + if (match_length <= 5 && (compressionStrategy == CompressionStrategy.Filtered || + (match_length == MIN_MATCH && strstart - match_start > 4096))) + { + + // If prev_match is also MIN_MATCH, match_start is garbage + // but we will ignore the current match anyway. + match_length = MIN_MATCH - 1; + } + } + + // If there was a match at the previous step and the current + // match is not better, output the previous match: + if (prev_length >= MIN_MATCH && match_length <= prev_length) + { + int max_insert = strstart + lookahead - MIN_MATCH; + // Do not insert strings in hash table beyond this. + + // check_match(strstart-1, prev_match, prev_length); + + bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH); + + // Insert in hash table all strings up to the end of the match. + // strstart-1 and strstart are already inserted. If there is not + // enough lookahead, the last two strings are not inserted in + // the hash table. + lookahead -= (prev_length - 1); + prev_length -= 2; + do + { + if (++strstart <= max_insert) + { + ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; + //prev[strstart&w_mask]=hash_head=head[ins_h]; + hash_head = (head[ins_h] & 0xffff); + prev[strstart & w_mask] = head[ins_h]; + head[ins_h] = unchecked((short)strstart); + } + } + while (--prev_length != 0); + match_available = 0; + match_length = MIN_MATCH - 1; + strstart++; + + if (bflush) + { + flush_block_only(false); + if (_codec.AvailableBytesOut == 0) + return BlockState.NeedMore; + } + } + else if (match_available != 0) + { + + // If there was no match at the previous position, output a + // single literal. If there was a match but the current match + // is longer, truncate the previous match to a single literal. + + bflush = _tr_tally(0, window[strstart - 1] & 0xff); + + if (bflush) + { + flush_block_only(false); + } + strstart++; + lookahead--; + if (_codec.AvailableBytesOut == 0) + return BlockState.NeedMore; + } + else + { + // There is no previous match to compare with, wait for + // the next step to decide. + + match_available = 1; + strstart++; + lookahead--; + } + } + + if (match_available != 0) + { + bflush = _tr_tally(0, window[strstart - 1] & 0xff); + match_available = 0; + } + flush_block_only(flush == FlushType.Finish); + + if (_codec.AvailableBytesOut == 0) + { + if (flush == FlushType.Finish) + return BlockState.FinishStarted; + else + return BlockState.NeedMore; + } + + return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone; + } + + + internal int longest_match(int cur_match) + { + int chain_length = config.MaxChainLength; // max hash chain length + int scan = strstart; // current string + int match; // matched string + int len; // length of current match + int best_len = prev_length; // best match length so far + int limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0; + + int niceLength = config.NiceLength; + + // Stop when cur_match becomes <= limit. To simplify the code, + // we prevent matches with the string of window index 0. + + int wmask = w_mask; + + int strend = strstart + MAX_MATCH; + byte scan_end1 = window[scan + best_len - 1]; + byte scan_end = window[scan + best_len]; + + // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + // It is easy to get rid of this optimization if necessary. + + // Do not waste too much time if we already have a good match: + if (prev_length >= config.GoodLength) + { + chain_length >>= 2; + } + + // Do not look for matches beyond the end of the input. This is necessary + // to make deflate deterministic. + if (niceLength > lookahead) + niceLength = lookahead; + + do + { + match = cur_match; + + // Skip to next match if the match length cannot increase + // or if the match length is less than 2: + if (window[match + best_len] != scan_end || + window[match + best_len - 1] != scan_end1 || + window[match] != window[scan] || + window[++match] != window[scan + 1]) + continue; + + // The check at best_len-1 can be removed because it will be made + // again later. (This heuristic is not always a win.) + // It is not necessary to compare scan[2] and match[2] since they + // are always equal when the other bytes match, given that + // the hash keys are equal and that HASH_BITS >= 8. + scan += 2; match++; + + // We check for insufficient lookahead only every 8th comparison; + // the 256th check will be made at strstart+258. + do + { + } + while (window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && scan < strend); + + len = MAX_MATCH - (int)(strend - scan); + scan = strend - MAX_MATCH; + + if (len > best_len) + { + match_start = cur_match; + best_len = len; + if (len >= niceLength) + break; + scan_end1 = window[scan + best_len - 1]; + scan_end = window[scan + best_len]; + } + } + while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length != 0); + + if (best_len <= lookahead) + return best_len; + return lookahead; + } + + + private bool Rfc1950BytesEmitted = false; + private bool _WantRfc1950HeaderBytes = true; + internal bool WantRfc1950HeaderBytes + { + get { return _WantRfc1950HeaderBytes; } + set { _WantRfc1950HeaderBytes = value; } + } + + + internal int Initialize(ZlibCodec codec, CompressionLevel level) + { + return Initialize(codec, level, ZlibConstants.WindowBitsMax); + } + + internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits) + { + return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, CompressionStrategy.Default); + } + + internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits, CompressionStrategy compressionStrategy) + { + return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, compressionStrategy); + } + + internal int Initialize(ZlibCodec codec, CompressionLevel level, int windowBits, int memLevel, CompressionStrategy strategy) + { + _codec = codec; + _codec.Message = null; + + // validation + if (windowBits < 9 || windowBits > 15) + throw new ZlibException("windowBits must be in the range 9..15."); + + if (memLevel < 1 || memLevel > MEM_LEVEL_MAX) + throw new ZlibException(String.Format("memLevel must be in the range 1.. {0}", MEM_LEVEL_MAX)); + + _codec.dstate = this; + + w_bits = windowBits; + w_size = 1 << w_bits; + w_mask = w_size - 1; + + hash_bits = memLevel + 7; + hash_size = 1 << hash_bits; + hash_mask = hash_size - 1; + hash_shift = ((hash_bits + MIN_MATCH - 1) / MIN_MATCH); + + window = new byte[w_size * 2]; + prev = new short[w_size]; + head = new short[hash_size]; + + // for memLevel==8, this will be 16384, 16k + lit_bufsize = 1 << (memLevel + 6); + + // Use a single array as the buffer for data pending compression, + // the output distance codes, and the output length codes (aka tree). + // orig comment: This works just fine since the average + // output size for (length,distance) codes is <= 24 bits. + pending = new byte[lit_bufsize * 4]; + _distanceOffset = lit_bufsize; + _lengthOffset = (1 + 2) * lit_bufsize; + + // So, for memLevel 8, the length of the pending buffer is 65536. 64k. + // The first 16k are pending bytes. + // The middle slice, of 32k, is used for distance codes. + // The final 16k are length codes. + + this.compressionLevel = level; + this.compressionStrategy = strategy; + + Reset(); + return ZlibConstants.Z_OK; + } + + + internal void Reset() + { + _codec.TotalBytesIn = _codec.TotalBytesOut = 0; + _codec.Message = null; + //strm.data_type = Z_UNKNOWN; + + pendingCount = 0; + nextPending = 0; + + Rfc1950BytesEmitted = false; + + status = (WantRfc1950HeaderBytes) ? INIT_STATE : BUSY_STATE; + _codec._Adler32 = Adler.Adler32(0, null, 0, 0); + + last_flush = (int)FlushType.None; + + _InitializeTreeData(); + _InitializeLazyMatch(); + } + + + internal int End() + { + if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE) + { + return ZlibConstants.Z_STREAM_ERROR; + } + // Deallocate in reverse order of allocations: + pending = null; + head = null; + prev = null; + window = null; + // free + // dstate=null; + return status == BUSY_STATE ? ZlibConstants.Z_DATA_ERROR : ZlibConstants.Z_OK; + } + + + private void SetDeflater() + { + switch (config.Flavor) + { + case DeflateFlavor.Store: + DeflateFunction = DeflateNone; + break; + case DeflateFlavor.Fast: + DeflateFunction = DeflateFast; + break; + case DeflateFlavor.Slow: + DeflateFunction = DeflateSlow; + break; + } + } + + + internal int SetParams(CompressionLevel level, CompressionStrategy strategy) + { + int result = ZlibConstants.Z_OK; + + if (compressionLevel != level) + { + Config newConfig = Config.Lookup(level); + + // change in the deflate flavor (Fast vs slow vs none)? + if (newConfig.Flavor != config.Flavor && _codec.TotalBytesIn != 0) + { + // Flush the last buffer: + result = _codec.Deflate(FlushType.Partial); + } + + compressionLevel = level; + config = newConfig; + SetDeflater(); + } + + // no need to flush with change in strategy? Really? + compressionStrategy = strategy; + + return result; + } + + + internal int SetDictionary(byte[] dictionary) + { + int length = dictionary.Length; + int index = 0; + + if (dictionary == null || status != INIT_STATE) + throw new ZlibException("Stream error."); + + _codec._Adler32 = Adler.Adler32(_codec._Adler32, dictionary, 0, dictionary.Length); + + if (length < MIN_MATCH) + return ZlibConstants.Z_OK; + if (length > w_size - MIN_LOOKAHEAD) + { + length = w_size - MIN_LOOKAHEAD; + index = dictionary.Length - length; // use the tail of the dictionary + } + Array.Copy(dictionary, index, window, 0, length); + strstart = length; + block_start = length; + + // Insert all strings in the hash table (except for the last two bytes). + // s->lookahead stays null, so s->ins_h will be recomputed at the next + // call of fill_window. + + ins_h = window[0] & 0xff; + ins_h = (((ins_h) << hash_shift) ^ (window[1] & 0xff)) & hash_mask; + + for (int n = 0; n <= length - MIN_MATCH; n++) + { + ins_h = (((ins_h) << hash_shift) ^ (window[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; + prev[n & w_mask] = head[ins_h]; + head[ins_h] = (short)n; + } + return ZlibConstants.Z_OK; + } + + + + internal int Deflate(FlushType flush) + { + int old_flush; + + if (_codec.OutputBuffer == null || + (_codec.InputBuffer == null && _codec.AvailableBytesIn != 0) || + (status == FINISH_STATE && flush != FlushType.Finish)) + { + _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_STREAM_ERROR)]; + throw new ZlibException(String.Format("Something is fishy. [{0}]", _codec.Message)); + //return ZlibConstants.Z_STREAM_ERROR; + } + if (_codec.AvailableBytesOut == 0) + { + _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)]; + throw new ZlibException("OutputBuffer is full (AvailableBytesOut == 0)"); + //return ZlibConstants.Z_BUF_ERROR; + } + + old_flush = last_flush; + last_flush = (int)flush; + + // Write the zlib (rfc1950) header bytes + if (status == INIT_STATE) + { + int header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8; + int level_flags = (((int)compressionLevel - 1) & 0xff) >> 1; + + if (level_flags > 3) + level_flags = 3; + header |= (level_flags << 6); + if (strstart != 0) + header |= PRESET_DICT; + header += 31 - (header % 31); + + status = BUSY_STATE; + //putShortMSB(header); + unchecked + { + pending[pendingCount++] = (byte)(header >> 8); + pending[pendingCount++] = (byte)header; + } + // Save the adler32 of the preset dictionary: + if (strstart != 0) + { + ////putShortMSB((int)(SharedUtils.URShift(_codec._Adler32, 16))); + //putShortMSB((int)((UInt64)_codec._Adler32 >> 16)); + //putShortMSB((int)(_codec._Adler32 & 0xffff)); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8); + pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF); + } + _codec._Adler32 = Adler.Adler32(0, null, 0, 0); + } + + // Flush as much pending output as possible + if (pendingCount != 0) + { + _codec.flush_pending(); + if (_codec.AvailableBytesOut == 0) + { + //System.out.println(" avail_out==0"); + // Since avail_out is 0, deflate will be called again with + // more output space, but possibly with both pending and + // avail_in equal to zero. There won't be anything to do, + // but this is not an error situation so make sure we + // return OK instead of BUF_ERROR at next call of deflate: + last_flush = -1; + return ZlibConstants.Z_OK; + } + + // Make sure there is something to do and avoid duplicate consecutive + // flushes. For repeated and useless calls with Z_FINISH, we keep + // returning Z_STREAM_END instead of Z_BUFF_ERROR. + } + else if (_codec.AvailableBytesIn == 0 && + (int)flush <= old_flush && + flush != FlushType.Finish) + { + // workitem 8557 + // Not sure why this needs to be an error. + // pendingCount == 0, which means there's nothing to deflate. + // And the caller has not asked for a FlushType.Finish, but... + // that seems very non-fatal. We can just say "OK" and do nthing. + + // _codec.Message = z_errmsg[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)]; + // throw new ZlibException("AvailableBytesIn == 0 && flush<=old_flush && flush != FlushType.Finish"); + + return ZlibConstants.Z_OK; + } + + // User must not provide more input after the first FINISH: + if (status == FINISH_STATE && _codec.AvailableBytesIn != 0) + { + _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)]; + throw new ZlibException("status == FINISH_STATE && _codec.AvailableBytesIn != 0"); + } + + + // Start a new block or continue the current one. + if (_codec.AvailableBytesIn != 0 || lookahead != 0 || (flush != FlushType.None && status != FINISH_STATE)) + { + + BlockState bstate = DeflateFunction(flush); + + if (bstate == BlockState.FinishStarted || bstate == BlockState.FinishDone) + { + status = FINISH_STATE; + } + if (bstate == BlockState.NeedMore || bstate == BlockState.FinishStarted) + { + if (_codec.AvailableBytesOut == 0) + { + last_flush = -1; // avoid BUF_ERROR next call, see above + } + return ZlibConstants.Z_OK; + // If flush != Z_NO_FLUSH && avail_out == 0, the next call + // of deflate should use the same flush parameter to make sure + // that the flush is complete. So we don't have to output an + // empty block here, this will be done at next call. This also + // ensures that for a very small output buffer, we emit at most + // one empty block. + } + + if (bstate == BlockState.BlockDone) + { + if (flush == FlushType.Partial) + { + _tr_align(); + } + else + { + // FlushType.Full or FlushType.Sync + _tr_stored_block(0, 0, false); + // For a full flush, this empty block will be recognized + // as a special marker by inflate_sync(). + if (flush == FlushType.Full) + { + // clear hash (forget the history) + for (int i = 0; i < hash_size; i++) + head[i] = 0; + } + } + _codec.flush_pending(); + if (_codec.AvailableBytesOut == 0) + { + last_flush = -1; // avoid BUF_ERROR at next call, see above + return ZlibConstants.Z_OK; + } + } + } + + if (flush != FlushType.Finish) + return ZlibConstants.Z_OK; + + if (!WantRfc1950HeaderBytes || Rfc1950BytesEmitted) + return ZlibConstants.Z_STREAM_END; + + // Write the zlib trailer (adler32) + pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8); + pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF); + //putShortMSB((int)(SharedUtils.URShift(_codec._Adler32, 16))); + //putShortMSB((int)(_codec._Adler32 & 0xffff)); + + _codec.flush_pending(); + + // If avail_out is zero, the application will call deflate again + // to flush the rest. + + Rfc1950BytesEmitted = true; // write the trailer only once! + + return pendingCount != 0 ? ZlibConstants.Z_OK : ZlibConstants.Z_STREAM_END; + } + + } +} \ No newline at end of file diff --git a/NBToolkit/NBToolkit/Zlib/DeflateStream.cs b/NBToolkit/NBToolkit/Zlib/DeflateStream.cs new file mode 100644 index 0000000..66c7343 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/DeflateStream.cs @@ -0,0 +1,705 @@ +// DeflateStream.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-November-03 18:48:49> +// +// ------------------------------------------------------------------ +// +// This module defines the DeflateStream class, which can be used as a replacement for +// the System.IO.Compression.DeflateStream class in the .NET BCL. +// +// ------------------------------------------------------------------ + + +using System; + +namespace Ionic.Zlib +{ + /// + /// A class for compressing and decompressing streams using the Deflate algorithm. + /// + /// + /// + /// + /// + /// The DeflateStream is a Decorator on a . It adds DEFLATE compression or decompression to any stream. + /// + /// + /// Using this stream, applications can compress or decompress data via + /// stream Read and Write operations. Either compresssion or + /// decompression can occur through either reading or writing. The compression + /// format used is DEFLATE, which is documented in IETF RFC 1951, "DEFLATE + /// Compressed Data Format Specification version 1.3.". + /// + /// + /// This class is similar to , except that ZlibStream + /// adds the RFC 1950 - ZLIB + /// framing bytes to a compressed stream when compressing, or expects the RFC1950 + /// framing bytes when decompressing. The DeflateStream does not. + /// + /// + /// + /// + /// + /// + public class DeflateStream : System.IO.Stream + { + internal ZlibBaseStream _baseStream; + internal System.IO.Stream _innerStream; + bool _disposed; + + /// + /// Create a DeflateStream using the specified CompressionMode. + /// + /// + /// When mode is CompressionMode.Compress, the DeflateStream + /// will use the default compression level. The "captive" stream will be closed + /// when the DeflateStream is closed. + /// + /// + /// This example uses a DeflateStream to compress data from a file, and writes + /// the compressed data to another file. + /// + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (var raw = System.IO.File.Create(fileToCompress + ".deflated")) + /// { + /// using (Stream compressor = new DeflateStream(raw, CompressionMode.Compress)) + /// { + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n; + /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0) + /// { + /// compressor.Write(buffer, 0, n); + /// } + /// } + /// } + /// } + /// + /// + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using raw As FileStream = File.Create(fileToCompress & ".deflated") + /// Using compressor As Stream = New DeflateStream(raw, CompressionMode.Compress) + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// End Using + /// + /// + /// The stream which will be read or written. + /// Indicates whether the DeflateStream will compress or decompress. + public DeflateStream(System.IO.Stream stream, CompressionMode mode) + : this(stream, mode, CompressionLevel.Default, false) + { + } + + /// + /// Create a DeflateStream using the specified CompressionMode and the specified CompressionLevel. + /// + /// + /// + /// + /// When mode is CompressionMode.Decompress, the level parameter + /// is ignored. The "captive" stream will be closed when the DeflateStream is + /// closed. + /// + /// + /// + /// + /// + /// This example uses a DeflateStream to compress data from a file, and writes + /// the compressed data to another file. + /// + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (var raw = System.IO.File.Create(fileToCompress + ".deflated")) + /// { + /// using (Stream compressor = new DeflateStream(raw, + /// CompressionMode.Compress, + /// CompressionLevel.BestCompression)) + /// { + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n= -1; + /// while (n != 0) + /// { + /// if (n > 0) + /// compressor.Write(buffer, 0, n); + /// n= input.Read(buffer, 0, buffer.Length); + /// } + /// } + /// } + /// } + /// + /// + /// + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using raw As FileStream = File.Create(fileToCompress & ".deflated") + /// Using compressor As Stream = New DeflateStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression) + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// End Using + /// + /// + /// The stream to be read or written while deflating or inflating. + /// Indicates whether the DeflateStream will compress or decompress. + /// A tuning knob to trade speed for effectiveness. + public DeflateStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level) + : this(stream, mode, level, false) + { + } + + /// + /// Create a DeflateStream using the specified + /// CompressionMode, and explicitly specify whether the + /// stream should be left open after Deflation or Inflation. + /// + /// + /// + /// + /// + /// This constructor allows the application to request that the captive stream + /// remain open after the deflation or inflation occurs. By default, after + /// Close() is called on the stream, the captive stream is also + /// closed. In some cases this is not desired, for example if the stream is a + /// memory stream that will be re-read after compression. Specify true for + /// the parameter to leave the stream open. + /// + /// + /// + /// The DeflateStream will use the default compression level. + /// + /// + /// + /// See the other overloads of this constructor for example code. + /// + /// + /// + /// + /// The stream which will be read or written. This is called the + /// "captive" stream in other places in this documentation. + /// + /// + /// + /// Indicates whether the DeflateStream will compress or decompress. + /// + /// + /// true if the application would like the stream to + /// remain open after inflation/deflation. + public DeflateStream(System.IO.Stream stream, CompressionMode mode, bool leaveOpen) + : this(stream, mode, CompressionLevel.Default, leaveOpen) + { + } + + /// + /// Create a DeflateStream using the specified CompressionMode + /// and the specified CompressionLevel, and explicitly specify whether + /// the stream should be left open after Deflation or Inflation. + /// + /// + /// + /// + /// + /// When mode is CompressionMode.Decompress, the level parameter is ignored. + /// + /// + /// + /// This constructor allows the application to request that the captive stream + /// remain open after the deflation or inflation occurs. By default, after + /// Close() is called on the stream, the captive stream is also + /// closed. In some cases this is not desired, for example if the stream is a + /// that will be re-read after + /// compression. Specify true for the parameter + /// to leave the stream open. + /// + /// + /// + /// + /// + /// + /// This example shows how to use a DeflateStream to compress data from + /// a file, and store the compressed data into another file. + /// + /// + /// using (var output = System.IO.File.Create(fileToCompress + ".deflated")) + /// { + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (Stream compressor = new DeflateStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, true)) + /// { + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n= -1; + /// while (n != 0) + /// { + /// if (n > 0) + /// compressor.Write(buffer, 0, n); + /// n= input.Read(buffer, 0, buffer.Length); + /// } + /// } + /// } + /// // can write additional data to the output stream here + /// } + /// + /// + /// + /// Using output As FileStream = File.Create(fileToCompress & ".deflated") + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using compressor As Stream = New DeflateStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, True) + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// ' can write additional data to the output stream here. + /// End Using + /// + /// + /// The stream which will be read or written. + /// Indicates whether the DeflateStream will compress or decompress. + /// true if the application would like the stream to remain open after inflation/deflation. + /// A tuning knob to trade speed for effectiveness. + public DeflateStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen) + { + _innerStream = stream; + _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen); + } + + #region Zlib properties + + /// + /// This property sets the flush behavior on the stream. + /// + /// See the ZLIB documentation for the meaning of the flush behavior. + /// + virtual public FlushType FlushMode + { + get { return (this._baseStream._flushMode); } + set + { + if (_disposed) throw new ObjectDisposedException("DeflateStream"); + this._baseStream._flushMode = value; + } + } + + /// + /// The size of the working buffer for the compression codec. + /// + /// + /// + /// + /// The working buffer is used for all stream operations. The default size is + /// 1024 bytes. The minimum size is 128 bytes. You may get better performance + /// with a larger buffer. Then again, you might not. You would have to test + /// it. + /// + /// + /// + /// Set this before the first call to Read() or Write() on the + /// stream. If you try to set it afterwards, it will throw. + /// + /// + public int BufferSize + { + get + { + return this._baseStream._bufferSize; + } + set + { + if (_disposed) throw new ObjectDisposedException("DeflateStream"); + if (this._baseStream._workingBuffer != null) + throw new ZlibException("The working buffer is already set."); + if (value < ZlibConstants.WorkingBufferSizeMin) + throw new ZlibException(String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, ZlibConstants.WorkingBufferSizeMin)); + this._baseStream._bufferSize = value; + } + } + + /// + /// The ZLIB strategy to be used during compression. + /// + /// + /// + /// By tweaking this parameter, you may be able to optimize the compression for + /// data with particular characteristics. + /// + public CompressionStrategy Strategy + { + get + { + return this._baseStream.Strategy; + } + set + { + if (_disposed) throw new ObjectDisposedException("DeflateStream"); + this._baseStream.Strategy = value; + } + } + + /// Returns the total number of bytes input so far. + virtual public long TotalIn + { + get + { + return this._baseStream._z.TotalBytesIn; + } + } + + /// Returns the total number of bytes output so far. + virtual public long TotalOut + { + get + { + return this._baseStream._z.TotalBytesOut; + } + } + + #endregion + + #region System.IO.Stream methods + /// + /// Dispose the stream. + /// + /// + /// This may or may not result in a Close() call on the captive stream. + /// See the constructors that have a leaveOpen parameter for more information. + /// + protected override void Dispose(bool disposing) + { + try + { + if (!_disposed) + { + if (disposing && (this._baseStream != null)) + this._baseStream.Close(); + _disposed = true; + } + } + finally + { + base.Dispose(disposing); + } + } + + + + /// + /// Indicates whether the stream can be read. + /// + /// + /// The return value depends on whether the captive stream supports reading. + /// + public override bool CanRead + { + get + { + if (_disposed) throw new ObjectDisposedException("DeflateStream"); + return _baseStream._stream.CanRead; + } + } + + /// + /// Indicates whether the stream supports Seek operations. + /// + /// + /// Always returns false. + /// + public override bool CanSeek + { + get { return false; } + } + + + /// + /// Indicates whether the stream can be written. + /// + /// + /// The return value depends on whether the captive stream supports writing. + /// + public override bool CanWrite + { + get + { + if (_disposed) throw new ObjectDisposedException("DeflateStream"); + return _baseStream._stream.CanWrite; + } + } + + /// + /// Flush the stream. + /// + public override void Flush() + { + if (_disposed) throw new ObjectDisposedException("DeflateStream"); + _baseStream.Flush(); + } + + /// + /// Reading this property always throws a . + /// + public override long Length + { + get { throw new NotImplementedException(); } + } + + /// + /// The position of the stream pointer. + /// + /// + /// + /// Setting this property always throws a . Reading will return the total bytes + /// written out, if used in writing, or the total bytes read in, if used in + /// reading. The count may refer to compressed bytes or uncompressed bytes, + /// depending on how you've used the stream. + /// + public override long Position + { + get + { + if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Writer) + return this._baseStream._z.TotalBytesOut; + if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Reader) + return this._baseStream._z.TotalBytesIn; + return 0; + } + set { throw new NotImplementedException(); } + } + + /// + /// Read data from the stream. + /// + /// + /// + /// + /// If you wish to use the DeflateStream to compress data while + /// reading, you can create a DeflateStream with + /// CompressionMode.Compress, providing an uncompressed data stream. + /// Then call Read() on that DeflateStream, and the data read will be + /// compressed as you read. If you wish to use the DeflateStream to + /// decompress data while reading, you can create a DeflateStream with + /// CompressionMode.Decompress, providing a readable compressed data + /// stream. Then call Read() on that DeflateStream, and the data read + /// will be decompressed as you read. + /// + /// + /// + /// A DeflateStream can be used for Read() or Write(), but not both. + /// + /// + /// + /// The buffer into which the read data should be placed. + /// the offset within that data array to put the first byte read. + /// the number of bytes to read. + /// the number of bytes actually read + public override int Read(byte[] buffer, int offset, int count) + { + if (_disposed) throw new ObjectDisposedException("DeflateStream"); + return _baseStream.Read(buffer, offset, count); + } + + + /// + /// Calling this method always throws a . + /// + /// this is irrelevant, since it will always throw! + /// this is irrelevant, since it will always throw! + /// irrelevant! + public override long Seek(long offset, System.IO.SeekOrigin origin) + { + throw new NotImplementedException(); + } + + /// + /// Calling this method always throws a . + /// + /// this is irrelevant, since it will always throw! + public override void SetLength(long value) + { + throw new NotImplementedException(); + } + + /// + /// Write data to the stream. + /// + /// + /// + /// + /// If you wish to use the DeflateStream to compress data while + /// writing, you can create a DeflateStream with + /// CompressionMode.Compress, and a writable output stream. Then call + /// Write() on that DeflateStream, providing uncompressed data + /// as input. The data sent to the output stream will be the compressed form + /// of the data written. If you wish to use the DeflateStream to + /// decompress data while writing, you can create a DeflateStream with + /// CompressionMode.Decompress, and a writable output stream. Then + /// call Write() on that stream, providing previously compressed + /// data. The data sent to the output stream will be the decompressed form of + /// the data written. + /// + /// + /// + /// A DeflateStream can be used for Read() or Write(), + /// but not both. + /// + /// + /// + /// + /// The buffer holding data to write to the stream. + /// the offset within that data array to find the first byte to write. + /// the number of bytes to write. + public override void Write(byte[] buffer, int offset, int count) + { + if (_disposed) throw new ObjectDisposedException("DeflateStream"); + _baseStream.Write(buffer, offset, count); + } + #endregion + + + + + /// + /// Compress a string into a byte array using DEFLATE. + /// + /// + /// + /// Uncompress it with . + /// + /// + /// + /// + /// + /// + /// A string to compress. The string will first be encoded + /// using UTF8, then compressed. + /// + /// + /// The string in compressed form + public static byte[] CompressString(String s) + { + using (var ms = new System.IO.MemoryStream()) + { + System.IO.Stream compressor = + new DeflateStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression); + ZlibBaseStream.CompressString(s, compressor); + return ms.ToArray(); + } + } + + + /// + /// Compress a byte array into a new byte array using DEFLATE. + /// + /// + /// + /// Uncompress it with . + /// + /// + /// + /// + /// + /// + /// A buffer to compress. + /// + /// + /// The data in compressed form + public static byte[] CompressBuffer(byte[] b) + { + using (var ms = new System.IO.MemoryStream()) + { + System.IO.Stream compressor = + new DeflateStream( ms, CompressionMode.Compress, CompressionLevel.BestCompression ); + + ZlibBaseStream.CompressBuffer(b, compressor); + return ms.ToArray(); + } + } + + + /// + /// Uncompress a DEFLATE'd byte array into a single string. + /// + /// + /// + /// + /// + /// + /// A buffer containing GZIP-compressed data. + /// + /// + /// The uncompressed string + public static String UncompressString(byte[] compressed) + { + using (var input = new System.IO.MemoryStream(compressed)) + { + System.IO.Stream decompressor = + new DeflateStream(input, CompressionMode.Decompress); + + return ZlibBaseStream.UncompressString(compressed, decompressor); + } + } + + + /// + /// Uncompress a DEFLATE'd byte array into a byte array. + /// + /// + /// + /// + /// + /// + /// A buffer containing data that has been compressed with DEFLATE. + /// + /// + /// The data in uncompressed form + public static byte[] UncompressBuffer(byte[] compressed) + { + using (var input = new System.IO.MemoryStream(compressed)) + { + System.IO.Stream decompressor = + new DeflateStream( input, CompressionMode.Decompress ); + + return ZlibBaseStream.UncompressBuffer(compressed, decompressor); + } + } + + } + +} + diff --git a/NBToolkit/NBToolkit/Zlib/GZipStream.cs b/NBToolkit/NBToolkit/Zlib/GZipStream.cs new file mode 100644 index 0000000..559a758 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/GZipStream.cs @@ -0,0 +1,1013 @@ +// GZipStream.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2010-January-09 12:04:28> +// +// ------------------------------------------------------------------ +// +// This module defines the GZipStream class, which can be used as a replacement for +// the System.IO.Compression.GZipStream class in the .NET BCL. NB: The design is not +// completely OO clean: there is some intelligence in the ZlibBaseStream that reads the +// GZip header. +// +// ------------------------------------------------------------------ + + +using System; +using System.IO; + +namespace Ionic.Zlib +{ + /// + /// A class for compressing and decompressing GZIP streams. + /// + /// + /// + /// + /// The GZipStream is a Decorator on a + /// . It adds GZIP compression or decompression to any + /// stream. + /// + /// + /// + /// Like the System.IO.Compression.GZipStream in the .NET Base Class Library, the + /// Ionic.Zlib.GZipStream can compress while writing, or decompress while + /// reading, but not vice versa. The compression method used is GZIP, which is + /// documented in IETF RFC + /// 1952, "GZIP file format specification version 4.3". + /// + /// + /// A GZipStream can be used to decompress data (through Read()) or + /// to compress data (through Write()), but not both. + /// + /// + /// + /// If you wish to use the GZipStream to compress data, you must wrap it + /// around a write-able stream. As you call Write() on the GZipStream, the + /// data will be compressed into the GZIP format. If you want to decompress data, + /// you must wrap the GZipStream around a readable stream that contains an + /// IETF RFC 1952-compliant stream. The data will be decompressed as you call + /// Read() on the GZipStream. + /// + /// + /// + /// Though the GZIP format allows data from multiple files to be concatenated + /// together, this stream handles only a single segment of GZIP format, typically + /// representing a single file. + /// + /// + /// + /// This class is similar to and . + /// ZlibStream handles RFC1950-compliant streams. + /// handles RFC1951-compliant streams. This class handles RFC1952-compliant streams. + /// + /// + /// + /// + /// + /// + public class GZipStream : System.IO.Stream + { + // GZip format + // source: http://tools.ietf.org/html/rfc1952 + // + // header id: 2 bytes 1F 8B + // compress method 1 byte 8= DEFLATE (none other supported) + // flag 1 byte bitfield (See below) + // mtime 4 bytes time_t (seconds since jan 1, 1970 UTC of the file. + // xflg 1 byte 2 = max compress used , 4 = max speed (can be ignored) + // OS 1 byte OS for originating archive. set to 0xFF in compression. + // extra field length 2 bytes optional - only if FEXTRA is set. + // extra field varies + // filename varies optional - if FNAME is set. zero terminated. ISO-8859-1. + // file comment varies optional - if FCOMMENT is set. zero terminated. ISO-8859-1. + // crc16 1 byte optional - present only if FHCRC bit is set + // compressed data varies + // CRC32 4 bytes + // isize 4 bytes data size modulo 2^32 + // + // FLG (FLaGs) + // bit 0 FTEXT - indicates file is ASCII text (can be safely ignored) + // bit 1 FHCRC - there is a CRC16 for the header immediately following the header + // bit 2 FEXTRA - extra fields are present + // bit 3 FNAME - the zero-terminated filename is present. encoding; ISO-8859-1. + // bit 4 FCOMMENT - a zero-terminated file comment is present. encoding: ISO-8859-1 + // bit 5 reserved + // bit 6 reserved + // bit 7 reserved + // + // On consumption: + // Extra field is a bunch of nonsense and can be safely ignored. + // Header CRC and OS, likewise. + // + // on generation: + // all optional fields get 0, except for the OS, which gets 255. + // + + + + /// + /// The comment on the GZIP stream. + /// + /// + /// + /// + /// The GZIP format allows for each file to optionally have an associated + /// comment stored with the file. The comment is encoded with the ISO-8859-1 + /// code page. To include a comment in a GZIP stream you create, set this + /// property before calling Write() for the first time on the + /// GZipStream. + /// + /// + /// + /// When using GZipStream to decompress, you can retrieve this property + /// after the first call to Read(). If no comment has been set in the + /// GZIP bytestream, the Comment property will return null + /// (Nothing in VB). + /// + /// + public String Comment + { + get + { + return _Comment; + } + set + { + if (_disposed) throw new ObjectDisposedException("GZipStream"); + _Comment = value; + } + } + + /// + /// The FileName for the GZIP stream. + /// + /// + /// + /// + /// + /// The GZIP format optionally allows each file to have an associated + /// filename. When compressing data (through Write()), set this + /// FileName before calling Write() the first time on the GZipStream. + /// The actual filename is encoded into the GZIP bytestream with the + /// ISO-8859-1 code page, according to RFC 1952. It is the application's + /// responsibility to insure that the FileName can be encoded and decoded + /// correctly with this code page. + /// + /// + /// + /// When decompressing (through Read()), you can retrieve this value + /// any time after the first Read(). In the case where there was no filename + /// encoded into the GZIP bytestream, the property will return null (Nothing + /// in VB). + /// + /// + public String FileName + { + get { return _FileName; } + set + { + if (_disposed) throw new ObjectDisposedException("GZipStream"); + _FileName = value; + if (_FileName == null) return; + if (_FileName.IndexOf("/") != -1) + { + _FileName = _FileName.Replace("/", "\\"); + } + if (_FileName.EndsWith("\\")) + throw new Exception("Illegal filename"); + if (_FileName.IndexOf("\\") != -1) + { + // trim any leading path + _FileName = Path.GetFileName(_FileName); + } + } + } + + /// + /// The last modified time for the GZIP stream. + /// + /// + /// + /// GZIP allows the storage of a last modified time with each GZIP entry. + /// When compressing data, you can set this before the first call to + /// Write(). When decompressing, you can retrieve this value any time + /// after the first call to Read(). + /// + public DateTime? LastModified; + + /// + /// The CRC on the GZIP stream. + /// + /// + /// This is used for internal error checking. You probably don't need to look at this property. + /// + public int Crc32 { get { return _Crc32; } } + + private int _headerByteCount; + internal ZlibBaseStream _baseStream; + bool _disposed; + bool _firstReadDone; + string _FileName; + string _Comment; + int _Crc32; + + + /// + /// Create a GZipStream using the specified CompressionMode. + /// + /// + /// + /// + /// When mode is CompressionMode.Compress, the GZipStream will use the + /// default compression level. + /// + /// + /// + /// As noted in the class documentation, the CompressionMode (Compress + /// or Decompress) also establishes the "direction" of the stream. A + /// GZipStream with CompressionMode.Compress works only through + /// Write(). A GZipStream with + /// CompressionMode.Decompress works only through Read(). + /// + /// + /// + /// + /// + /// This example shows how to use a GZipStream to compress data. + /// + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (var raw = System.IO.File.Create(outputFile)) + /// { + /// using (Stream compressor = new GZipStream(raw, CompressionMode.Compress)) + /// { + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n; + /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0) + /// { + /// compressor.Write(buffer, 0, n); + /// } + /// } + /// } + /// } + /// + /// + /// Dim outputFile As String = (fileToCompress & ".compressed") + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using raw As FileStream = File.Create(outputFile) + /// Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress) + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// End Using + /// + /// + /// + /// + /// This example shows how to use a GZipStream to uncompress a file. + /// + /// private void GunZipFile(string filename) + /// { + /// if (!filename.EndsWith(".gz)) + /// throw new ArgumentException("filename"); + /// var DecompressedFile = filename.Substring(0,filename.Length-3); + /// byte[] working = new byte[WORKING_BUFFER_SIZE]; + /// int n= 1; + /// using (System.IO.Stream input = System.IO.File.OpenRead(filename)) + /// { + /// using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true)) + /// { + /// using (var output = System.IO.File.Create(DecompressedFile)) + /// { + /// while (n !=0) + /// { + /// n= decompressor.Read(working, 0, working.Length); + /// if (n > 0) + /// { + /// output.Write(working, 0, n); + /// } + /// } + /// } + /// } + /// } + /// } + /// + /// + /// + /// Private Sub GunZipFile(ByVal filename as String) + /// If Not (filename.EndsWith(".gz)) Then + /// Throw New ArgumentException("filename") + /// End If + /// Dim DecompressedFile as String = filename.Substring(0,filename.Length-3) + /// Dim working(WORKING_BUFFER_SIZE) as Byte + /// Dim n As Integer = 1 + /// Using input As Stream = File.OpenRead(filename) + /// Using decompressor As Stream = new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, True) + /// Using output As Stream = File.Create(UncompressedFile) + /// Do + /// n= decompressor.Read(working, 0, working.Length) + /// If n > 0 Then + /// output.Write(working, 0, n) + /// End IF + /// Loop While (n > 0) + /// End Using + /// End Using + /// End Using + /// End Sub + /// + /// + /// + /// The stream which will be read or written. + /// Indicates whether the GZipStream will compress or decompress. + public GZipStream(Stream stream, CompressionMode mode) + : this(stream, mode, CompressionLevel.Default, false) + { + } + + /// + /// Create a GZipStream using the specified CompressionMode and + /// the specified CompressionLevel. + /// + /// + /// + /// + /// The CompressionMode (Compress or Decompress) also establishes the + /// "direction" of the stream. A GZipStream with + /// CompressionMode.Compress works only through Write(). A + /// GZipStream with CompressionMode.Decompress works only + /// through Read(). + /// + /// + /// + /// + /// + /// + /// This example shows how to use a GZipStream to compress a file into a .gz file. + /// + /// + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (var raw = System.IO.File.Create(fileToCompress + ".gz")) + /// { + /// using (Stream compressor = new GZipStream(raw, + /// CompressionMode.Compress, + /// CompressionLevel.BestCompression)) + /// { + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n; + /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0) + /// { + /// compressor.Write(buffer, 0, n); + /// } + /// } + /// } + /// } + /// + /// + /// + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using raw As FileStream = File.Create(fileToCompress & ".gz") + /// Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression) + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// End Using + /// + /// + /// The stream to be read or written while deflating or inflating. + /// Indicates whether the GZipStream will compress or decompress. + /// A tuning knob to trade speed for effectiveness. + public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level) + : this(stream, mode, level, false) + { + } + + /// + /// Create a GZipStream using the specified CompressionMode, and + /// explicitly specify whether the stream should be left open after Deflation + /// or Inflation. + /// + /// + /// + /// + /// This constructor allows the application to request that the captive stream + /// remain open after the deflation or inflation occurs. By default, after + /// Close() is called on the stream, the captive stream is also + /// closed. In some cases this is not desired, for example if the stream is a + /// memory stream that will be re-read after compressed data has been written + /// to it. Specify true for the parameter to leave + /// the stream open. + /// + /// + /// + /// The (Compress or Decompress) also + /// establishes the "direction" of the stream. A GZipStream with + /// CompressionMode.Compress works only through Write(). A GZipStream + /// with CompressionMode.Decompress works only through Read(). + /// + /// + /// + /// The GZipStream will use the default compression level. If you want + /// to specify the compression level, see . + /// + /// + /// + /// See the other overloads of this constructor for example code. + /// + /// + /// + /// + /// + /// The stream which will be read or written. This is called the "captive" + /// stream in other places in this documentation. + /// + /// + /// Indicates whether the GZipStream will compress or decompress. + /// + /// + /// + /// true if the application would like the base stream to remain open after + /// inflation/deflation. + /// + public GZipStream(Stream stream, CompressionMode mode, bool leaveOpen) + : this(stream, mode, CompressionLevel.Default, leaveOpen) + { + } + + /// + /// Create a GZipStream using the specified CompressionMode and the + /// specified CompressionLevel, and explicitly specify whether the + /// stream should be left open after Deflation or Inflation. + /// + /// + /// + /// + /// + /// This constructor allows the application to request that the captive stream + /// remain open after the deflation or inflation occurs. By default, after + /// Close() is called on the stream, the captive stream is also + /// closed. In some cases this is not desired, for example if the stream is a + /// memory stream that will be re-read after compressed data has been written + /// to it. Specify true for the parameter to + /// leave the stream open. + /// + /// + /// + /// As noted in the class documentation, the CompressionMode (Compress + /// or Decompress) also establishes the "direction" of the stream. A + /// GZipStream with CompressionMode.Compress works only through + /// Write(). A GZipStream with CompressionMode.Decompress works only + /// through Read(). + /// + /// + /// + /// + /// + /// This example shows how to use a GZipStream to compress data. + /// + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (var raw = System.IO.File.Create(outputFile)) + /// { + /// using (Stream compressor = new GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression, true)) + /// { + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n; + /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0) + /// { + /// compressor.Write(buffer, 0, n); + /// } + /// } + /// } + /// } + /// + /// + /// Dim outputFile As String = (fileToCompress & ".compressed") + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using raw As FileStream = File.Create(outputFile) + /// Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression, True) + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// End Using + /// + /// + /// The stream which will be read or written. + /// Indicates whether the GZipStream will compress or decompress. + /// true if the application would like the stream to remain open after inflation/deflation. + /// A tuning knob to trade speed for effectiveness. + public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen) + { + _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen); + } + + #region Zlib properties + + /// + /// This property sets the flush behavior on the stream. + /// + virtual public FlushType FlushMode + { + get { return (this._baseStream._flushMode); } + set { + if (_disposed) throw new ObjectDisposedException("GZipStream"); + this._baseStream._flushMode = value; + } + } + + /// + /// The size of the working buffer for the compression codec. + /// + /// + /// + /// + /// The working buffer is used for all stream operations. The default size is + /// 1024 bytes. The minimum size is 128 bytes. You may get better performance + /// with a larger buffer. Then again, you might not. You would have to test + /// it. + /// + /// + /// + /// Set this before the first call to Read() or Write() on the + /// stream. If you try to set it afterwards, it will throw. + /// + /// + public int BufferSize + { + get + { + return this._baseStream._bufferSize; + } + set + { + if (_disposed) throw new ObjectDisposedException("GZipStream"); + if (this._baseStream._workingBuffer != null) + throw new ZlibException("The working buffer is already set."); + if (value < ZlibConstants.WorkingBufferSizeMin) + throw new ZlibException(String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, ZlibConstants.WorkingBufferSizeMin)); + this._baseStream._bufferSize = value; + } + } + + + /// Returns the total number of bytes input so far. + virtual public long TotalIn + { + get + { + return this._baseStream._z.TotalBytesIn; + } + } + + /// Returns the total number of bytes output so far. + virtual public long TotalOut + { + get + { + return this._baseStream._z.TotalBytesOut; + } + } + + #endregion + + #region Stream methods + + /// + /// Dispose the stream. + /// + /// + /// This may or may not result in a Close() call on the captive stream. + /// See the doc on constructors that take a leaveOpen parameter for more information. + /// + protected override void Dispose(bool disposing) + { + try + { + if (!_disposed) + { + if (disposing && (this._baseStream != null)) + { + this._baseStream.Close(); + this._Crc32 = _baseStream.Crc32; + } + _disposed = true; + } + } + finally + { + base.Dispose(disposing); + } + } + + + /// + /// Indicates whether the stream can be read. + /// + /// + /// The return value depends on whether the captive stream supports reading. + /// + public override bool CanRead + { + get + { + if (_disposed) throw new ObjectDisposedException("GZipStream"); + return _baseStream._stream.CanRead; + } + } + + /// + /// Indicates whether the stream supports Seek operations. + /// + /// + /// Always returns false. + /// + public override bool CanSeek + { + get { return false; } + } + + + /// + /// Indicates whether the stream can be written. + /// + /// + /// The return value depends on whether the captive stream supports writing. + /// + public override bool CanWrite + { + get + { + if (_disposed) throw new ObjectDisposedException("GZipStream"); + return _baseStream._stream.CanWrite; + } + } + + /// + /// Flush the stream. + /// + public override void Flush() + { + if (_disposed) throw new ObjectDisposedException("GZipStream"); + _baseStream.Flush(); + } + + /// + /// Reading this property always throws a . + /// + public override long Length + { + get { throw new NotImplementedException(); } + } + + /// + /// The position of the stream pointer. + /// + /// + /// + /// Setting this property always throws a . Reading will return the total bytes + /// written out, if used in writing, or the total bytes read in, if used in + /// reading. The count may refer to compressed bytes or uncompressed bytes, + /// depending on how you've used the stream. + /// + public override long Position + { + get + { + if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Writer) + return this._baseStream._z.TotalBytesOut + _headerByteCount; + if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Reader) + return this._baseStream._z.TotalBytesIn + this._baseStream._gzipHeaderByteCount; + return 0; + } + + set { throw new NotImplementedException(); } + } + + /// + /// Read and decompress data from the source stream. + /// + /// + /// + /// With a GZipStream, decompression is done through reading. + /// + /// + /// + /// + /// byte[] working = new byte[WORKING_BUFFER_SIZE]; + /// using (System.IO.Stream input = System.IO.File.OpenRead(_CompressedFile)) + /// { + /// using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true)) + /// { + /// using (var output = System.IO.File.Create(_DecompressedFile)) + /// { + /// int n; + /// while ((n= decompressor.Read(working, 0, working.Length)) !=0) + /// { + /// output.Write(working, 0, n); + /// } + /// } + /// } + /// } + /// + /// + /// The buffer into which the decompressed data should be placed. + /// the offset within that data array to put the first byte read. + /// the number of bytes to read. + /// the number of bytes actually read + public override int Read(byte[] buffer, int offset, int count) + { + if (_disposed) throw new ObjectDisposedException("GZipStream"); + int n = _baseStream.Read(buffer, offset, count); + + // Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n); + // Console.WriteLine( Util.FormatByteArray(buffer, offset, n) ); + + if (!_firstReadDone) + { + _firstReadDone = true; + FileName = _baseStream._GzipFileName; + Comment = _baseStream._GzipComment; + } + return n; + } + + + + /// + /// Calling this method always throws a . + /// + /// irrelevant; it will always throw! + /// irrelevant; it will always throw! + /// irrelevant! + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotImplementedException(); + } + + /// + /// Calling this method always throws a . + /// + /// irrelevant; this method will always throw! + public override void SetLength(long value) + { + throw new NotImplementedException(); + } + + /// + /// Write data to the stream. + /// + /// + /// + /// + /// If you wish to use the GZipStream to compress data while writing, + /// you can create a GZipStream with CompressionMode.Compress, and a + /// writable output stream. Then call Write() on that GZipStream, + /// providing uncompressed data as input. The data sent to the output stream + /// will be the compressed form of the data written. + /// + /// + /// + /// A GZipStream can be used for Read() or Write(), but not + /// both. Writing implies compression. Reading implies decompression. + /// + /// + /// + /// The buffer holding data to write to the stream. + /// the offset within that data array to find the first byte to write. + /// the number of bytes to write. + public override void Write(byte[] buffer, int offset, int count) + { + if (_disposed) throw new ObjectDisposedException("GZipStream"); + if (_baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Undefined) + { + //Console.WriteLine("GZipStream: First write"); + if (_baseStream._wantCompress) + { + // first write in compression, therefore, emit the GZIP header + _headerByteCount = EmitHeader(); + } + else + { + throw new InvalidOperationException(); + } + } + + _baseStream.Write(buffer, offset, count); + } + #endregion + + + internal static readonly System.DateTime _unixEpoch = new System.DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); + internal static readonly System.Text.Encoding iso8859dash1 = System.Text.Encoding.GetEncoding("iso-8859-1"); + + + private int EmitHeader() + { + byte[] commentBytes = (Comment == null) ? null : iso8859dash1.GetBytes(Comment); + byte[] filenameBytes = (FileName == null) ? null : iso8859dash1.GetBytes(FileName); + + int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1; + int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1; + + int bufferLength = 10 + cbLength + fnLength; + byte[] header = new byte[bufferLength]; + int i = 0; + // ID + header[i++] = 0x1F; + header[i++] = 0x8B; + + // compression method + header[i++] = 8; + byte flag = 0; + if (Comment != null) + flag ^= 0x10; + if (FileName != null) + flag ^= 0x8; + + // flag + header[i++] = flag; + + // mtime + if (!LastModified.HasValue) LastModified = DateTime.Now; + System.TimeSpan delta = LastModified.Value - _unixEpoch; + Int32 timet = (Int32)delta.TotalSeconds; + Array.Copy(BitConverter.GetBytes(timet), 0, header, i, 4); + i += 4; + + // xflg + header[i++] = 0; // this field is totally useless + // OS + header[i++] = 0xFF; // 0xFF == unspecified + + // extra field length - only if FEXTRA is set, which it is not. + //header[i++]= 0; + //header[i++]= 0; + + // filename + if (fnLength != 0) + { + Array.Copy(filenameBytes, 0, header, i, fnLength - 1); + i += fnLength - 1; + header[i++] = 0; // terminate + } + + // comment + if (cbLength != 0) + { + Array.Copy(commentBytes, 0, header, i, cbLength - 1); + i += cbLength - 1; + header[i++] = 0; // terminate + } + + _baseStream._stream.Write(header, 0, header.Length); + + return header.Length; // bytes written + } + + + + /// + /// Compress a string into a byte array using GZip. + /// + /// + /// + /// Uncompress it with . + /// + /// + /// + /// + /// + /// + /// A string to compress. The string will first be encoded + /// using UTF8, then compressed. + /// + /// + /// The string in compressed form + public static byte[] CompressString(String s) + { + using (var ms = new MemoryStream()) + { + System.IO.Stream compressor = + new GZipStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression); + ZlibBaseStream.CompressString(s, compressor); + return ms.ToArray(); + } + } + + + /// + /// Compress a byte array into a new byte array using GZip. + /// + /// + /// + /// Uncompress it with . + /// + /// + /// + /// + /// + /// + /// A buffer to compress. + /// + /// + /// The data in compressed form + public static byte[] CompressBuffer(byte[] b) + { + using (var ms = new MemoryStream()) + { + System.IO.Stream compressor = + new GZipStream( ms, CompressionMode.Compress, CompressionLevel.BestCompression ); + + ZlibBaseStream.CompressBuffer(b, compressor); + return ms.ToArray(); + } + } + + + /// + /// Uncompress a GZip'ed byte array into a single string. + /// + /// + /// + /// + /// + /// + /// A buffer containing GZIP-compressed data. + /// + /// + /// The uncompressed string + public static String UncompressString(byte[] compressed) + { + using (var input = new MemoryStream(compressed)) + { + Stream decompressor = new GZipStream(input, CompressionMode.Decompress); + return ZlibBaseStream.UncompressString(compressed, decompressor); + } + } + + + /// + /// Uncompress a GZip'ed byte array into a byte array. + /// + /// + /// + /// + /// + /// + /// A buffer containing data that has been compressed with GZip. + /// + /// + /// The data in uncompressed form + public static byte[] UncompressBuffer(byte[] compressed) + { + using (var input = new System.IO.MemoryStream(compressed)) + { + System.IO.Stream decompressor = + new GZipStream( input, CompressionMode.Decompress ); + + return ZlibBaseStream.UncompressBuffer(compressed, decompressor); + } + } + + + } +} diff --git a/NBToolkit/NBToolkit/Zlib/InfTree.cs b/NBToolkit/NBToolkit/Zlib/InfTree.cs new file mode 100644 index 0000000..ffc5bb6 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/InfTree.cs @@ -0,0 +1,436 @@ +// Inftree.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-October-28 12:43:54> +// +// ------------------------------------------------------------------ +// +// This module defines classes used in decompression. This code is derived +// from the jzlib implementation of zlib. In keeping with the license for jzlib, +// the copyright to that code is below. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + + + +using System; +namespace Ionic.Zlib +{ + + sealed class InfTree + { + + private const int MANY = 1440; + + private const int Z_OK = 0; + private const int Z_STREAM_END = 1; + private const int Z_NEED_DICT = 2; + private const int Z_ERRNO = - 1; + private const int Z_STREAM_ERROR = - 2; + private const int Z_DATA_ERROR = - 3; + private const int Z_MEM_ERROR = - 4; + private const int Z_BUF_ERROR = - 5; + private const int Z_VERSION_ERROR = - 6; + + internal const int fixed_bl = 9; + internal const int fixed_bd = 5; + + //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_tl'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] fixed_tl = new int[]{96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 192, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 160, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 224, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 144, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 208, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 176, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 240, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 200, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 168, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 232, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 152, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 216, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 184, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 248, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 196, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 164, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 228, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 148, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 212, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 180, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 244, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 204, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 172, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 236, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 156, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 220, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 188, 0, 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 252, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 194, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 162, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 226, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 146, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 210, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 178, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 242, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 202, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 170, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 234, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 154, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 218, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 186, + 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 250, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 198, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 166, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 230, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 150, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 214, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 182, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 246, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 206, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 174, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 238, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 158, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 222, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 190, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 254, 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 193, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 161, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 225, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 145, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 209, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 177, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 241, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 201, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 169, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 233, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 153, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 217, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 185, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 249, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 197, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 165, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 229, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 149, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 213, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 181, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 245, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 205, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 173, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 237, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 157, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 221, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 189, 0, 8, + 14, 0, 8, 142, 0, 8, 78, 0, 9, 253, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 195, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 163, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 227, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 147, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 211, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 179, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 243, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 203, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 171, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 235, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 155, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 219, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 187, 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 251, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 199, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 167, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 231, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 151, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 215, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 183, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 247, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 207, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 175, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 239, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 159, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 223, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 191, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 255}; + //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_td'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] fixed_td = new int[]{80, 5, 1, 87, 5, 257, 83, 5, 17, 91, 5, 4097, 81, 5, 5, 89, 5, 1025, 85, 5, 65, 93, 5, 16385, 80, 5, 3, 88, 5, 513, 84, 5, 33, 92, 5, 8193, 82, 5, 9, 90, 5, 2049, 86, 5, 129, 192, 5, 24577, 80, 5, 2, 87, 5, 385, 83, 5, 25, 91, 5, 6145, 81, 5, 7, 89, 5, 1537, 85, 5, 97, 93, 5, 24577, 80, 5, 4, 88, 5, 769, 84, 5, 49, 92, 5, 12289, 82, 5, 13, 90, 5, 3073, 86, 5, 193, 192, 5, 24577}; + + // Tables for deflate from PKZIP's appnote.txt. + //UPGRADE_NOTE: Final was removed from the declaration of 'cplens'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] cplens = new int[]{3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + + // see note #13 above about 258 + //UPGRADE_NOTE: Final was removed from the declaration of 'cplext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] cplext = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; + + //UPGRADE_NOTE: Final was removed from the declaration of 'cpdist'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] cpdist = new int[]{1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577}; + + //UPGRADE_NOTE: Final was removed from the declaration of 'cpdext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] cpdext = new int[]{0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; + + // If BMAX needs to be larger than 16, then h and x[] should be uLong. + internal const int BMAX = 15; // maximum bit length of any code + + internal int[] hn = null; // hufts used in space + internal int[] v = null; // work area for huft_build + internal int[] c = null; // bit length count table + internal int[] r = null; // table entry for structure assignment + internal int[] u = null; // table stack + internal int[] x = null; // bit offsets, then code stack + + private int huft_build(int[] b, int bindex, int n, int s, int[] d, int[] e, int[] t, int[] m, int[] hp, int[] hn, int[] v) + { + // Given a list of code lengths and a maximum table size, make a set of + // tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR + // if the given code set is incomplete (the tables are still built in this + // case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of + // lengths), or Z_MEM_ERROR if not enough memory. + + int a; // counter for codes of length k + int f; // i repeats in table every f entries + int g; // maximum code length + int h; // table level + int i; // counter, current code + int j; // counter + int k; // number of bits in current code + int l; // bits per table (returned in m) + int mask; // (1 << w) - 1, to avoid cc -O bug on HP + int p; // pointer into c[], b[], or v[] + int q; // points to current table + int w; // bits before this table == (l * h) + int xp; // pointer into x + int y; // number of dummy codes added + int z; // number of entries in current table + + // Generate counts for each bit length + + p = 0; i = n; + do + { + c[b[bindex + p]]++; p++; i--; // assume all entries <= BMAX + } + while (i != 0); + + if (c[0] == n) + { + // null input--all zero length codes + t[0] = - 1; + m[0] = 0; + return Z_OK; + } + + // Find minimum and maximum length, bound *m by those + l = m[0]; + for (j = 1; j <= BMAX; j++) + if (c[j] != 0) + break; + k = j; // minimum code length + if (l < j) + { + l = j; + } + for (i = BMAX; i != 0; i--) + { + if (c[i] != 0) + break; + } + g = i; // maximum code length + if (l > i) + { + l = i; + } + m[0] = l; + + // Adjust last length count to fill out codes, if needed + for (y = 1 << j; j < i; j++, y <<= 1) + { + if ((y -= c[j]) < 0) + { + return Z_DATA_ERROR; + } + } + if ((y -= c[i]) < 0) + { + return Z_DATA_ERROR; + } + c[i] += y; + + // Generate starting offsets into the value table for each length + x[1] = j = 0; + p = 1; xp = 2; + while (--i != 0) + { + // note that i == g from above + x[xp] = (j += c[p]); + xp++; + p++; + } + + // Make a table of values in order of bit lengths + i = 0; p = 0; + do + { + if ((j = b[bindex + p]) != 0) + { + v[x[j]++] = i; + } + p++; + } + while (++i < n); + n = x[g]; // set n to length of v + + // Generate the Huffman codes and for each, make the table entries + x[0] = i = 0; // first Huffman code is zero + p = 0; // grab values in bit order + h = - 1; // no tables yet--level -1 + w = - l; // bits decoded == (l * h) + u[0] = 0; // just to keep compilers happy + q = 0; // ditto + z = 0; // ditto + + // go through the bit lengths (k already is bits in shortest code) + for (; k <= g; k++) + { + a = c[k]; + while (a-- != 0) + { + // here i is the Huffman code of length k bits for value *p + // make tables up to required level + while (k > w + l) + { + h++; + w += l; // previous table always l bits + // compute minimum size table less than or equal to l bits + z = g - w; + z = (z > l)?l:z; // table size upper limit + if ((f = 1 << (j = k - w)) > a + 1) + { + // try a k-w bit table + // too few codes for k-w bit table + f -= (a + 1); // deduct codes from patterns left + xp = k; + if (j < z) + { + while (++j < z) + { + // try smaller tables up to z bits + if ((f <<= 1) <= c[++xp]) + break; // enough codes to use up j bits + f -= c[xp]; // else deduct codes from patterns + } + } + } + z = 1 << j; // table entries for j-bit table + + // allocate new table + if (hn[0] + z > MANY) + { + // (note: doesn't matter for fixed) + return Z_DATA_ERROR; // overflow of MANY + } + u[h] = q = hn[0]; // DEBUG + hn[0] += z; + + // connect to last table, if there is one + if (h != 0) + { + x[h] = i; // save pattern for backing up + r[0] = (sbyte) j; // bits in this table + r[1] = (sbyte) l; // bits to dump before this table + j = SharedUtils.URShift(i, (w - l)); + r[2] = (int) (q - u[h - 1] - j); // offset to this table + Array.Copy(r, 0, hp, (u[h - 1] + j) * 3, 3); // connect to last table + } + else + { + t[0] = q; // first table is returned result + } + } + + // set up table entry in r + r[1] = (sbyte) (k - w); + if (p >= n) + { + r[0] = 128 + 64; // out of values--invalid code + } + else if (v[p] < s) + { + r[0] = (sbyte) (v[p] < 256?0:32 + 64); // 256 is end-of-block + r[2] = v[p++]; // simple code is just the value + } + else + { + r[0] = (sbyte) (e[v[p] - s] + 16 + 64); // non-simple--look up in lists + r[2] = d[v[p++] - s]; + } + + // fill code-like entries with r + f = 1 << (k - w); + for (j = SharedUtils.URShift(i, w); j < z; j += f) + { + Array.Copy(r, 0, hp, (q + j) * 3, 3); + } + + // backwards increment the k-bit code i + for (j = 1 << (k - 1); (i & j) != 0; j = SharedUtils.URShift(j, 1)) + { + i ^= j; + } + i ^= j; + + // backup over finished tables + mask = (1 << w) - 1; // needed on HP, cc -O bug + while ((i & mask) != x[h]) + { + h--; // don't need to update q + w -= l; + mask = (1 << w) - 1; + } + } + } + // Return Z_BUF_ERROR if we were given an incomplete table + return y != 0 && g != 1?Z_BUF_ERROR:Z_OK; + } + + internal int inflate_trees_bits(int[] c, int[] bb, int[] tb, int[] hp, ZlibCodec z) + { + int result; + initWorkArea(19); + hn[0] = 0; + result = huft_build(c, 0, 19, 19, null, null, tb, bb, hp, hn, v); + + if (result == Z_DATA_ERROR) + { + z.Message = "oversubscribed dynamic bit lengths tree"; + } + else if (result == Z_BUF_ERROR || bb[0] == 0) + { + z.Message = "incomplete dynamic bit lengths tree"; + result = Z_DATA_ERROR; + } + return result; + } + + internal int inflate_trees_dynamic(int nl, int nd, int[] c, int[] bl, int[] bd, int[] tl, int[] td, int[] hp, ZlibCodec z) + { + int result; + + // build literal/length tree + initWorkArea(288); + hn[0] = 0; + result = huft_build(c, 0, nl, 257, cplens, cplext, tl, bl, hp, hn, v); + if (result != Z_OK || bl[0] == 0) + { + if (result == Z_DATA_ERROR) + { + z.Message = "oversubscribed literal/length tree"; + } + else if (result != Z_MEM_ERROR) + { + z.Message = "incomplete literal/length tree"; + result = Z_DATA_ERROR; + } + return result; + } + + // build distance tree + initWorkArea(288); + result = huft_build(c, nl, nd, 0, cpdist, cpdext, td, bd, hp, hn, v); + + if (result != Z_OK || (bd[0] == 0 && nl > 257)) + { + if (result == Z_DATA_ERROR) + { + z.Message = "oversubscribed distance tree"; + } + else if (result == Z_BUF_ERROR) + { + z.Message = "incomplete distance tree"; + result = Z_DATA_ERROR; + } + else if (result != Z_MEM_ERROR) + { + z.Message = "empty distance tree with lengths"; + result = Z_DATA_ERROR; + } + return result; + } + + return Z_OK; + } + + internal static int inflate_trees_fixed(int[] bl, int[] bd, int[][] tl, int[][] td, ZlibCodec z) + { + bl[0] = fixed_bl; + bd[0] = fixed_bd; + tl[0] = fixed_tl; + td[0] = fixed_td; + return Z_OK; + } + + private void initWorkArea(int vsize) + { + if (hn == null) + { + hn = new int[1]; + v = new int[vsize]; + c = new int[BMAX + 1]; + r = new int[3]; + u = new int[BMAX]; + x = new int[BMAX + 1]; + } + else + { + if (v.Length < vsize) + { + v = new int[vsize]; + } + Array.Clear(v,0,vsize); + Array.Clear(c,0,BMAX+1); + r[0]=0; r[1]=0; r[2]=0; + // for(int i=0; i +// +// ------------------------------------------------------------------ +// +// This module defines classes for decompression. This code is derived +// from the jzlib implementation of zlib, but significantly modified. +// The object model is not the same, and many of the behaviors are +// different. Nonetheless, in keeping with the license for jzlib, I am +// reproducing the copyright to that code here. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + + +using System; +namespace Ionic.Zlib +{ + sealed class InflateBlocks + { + private const int MANY = 1440; + + // Table for deflate from PKZIP's appnote.txt. + internal static readonly int[] border = new int[] + { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; + + private enum InflateBlockMode + { + TYPE = 0, // get type bits (3, including end bit) + LENS = 1, // get lengths for stored + STORED = 2, // processing stored block + TABLE = 3, // get table lengths + BTREE = 4, // get bit lengths tree for a dynamic block + DTREE = 5, // get length, distance trees for a dynamic block + CODES = 6, // processing fixed or dynamic block + DRY = 7, // output remaining window bytes + DONE = 8, // finished last block, done + BAD = 9, // ot a data error--stuck here + } + + private InflateBlockMode mode; // current inflate_block mode + + internal int left; // if STORED, bytes left to copy + + internal int table; // table lengths (14 bits) + internal int index; // index into blens (or border) + internal int[] blens; // bit lengths of codes + internal int[] bb = new int[1]; // bit length tree depth + internal int[] tb = new int[1]; // bit length decoding tree + + internal InflateCodes codes = new InflateCodes(); // if CODES, current state + + internal int last; // true if this block is the last block + + internal ZlibCodec _codec; // pointer back to this zlib stream + + // mode independent information + internal int bitk; // bits in bit buffer + internal int bitb; // bit buffer + internal int[] hufts; // single malloc for tree space + internal byte[] window; // sliding window + internal int end; // one byte after sliding window + internal int readAt; // window read pointer + internal int writeAt; // window write pointer + internal System.Object checkfn; // check function + internal uint check; // check on output + + internal InfTree inftree = new InfTree(); + + internal InflateBlocks(ZlibCodec codec, System.Object checkfn, int w) + { + _codec = codec; + hufts = new int[MANY * 3]; + window = new byte[w]; + end = w; + this.checkfn = checkfn; + mode = InflateBlockMode.TYPE; + Reset(); + } + + internal uint Reset() + { + uint oldCheck = check; + mode = InflateBlockMode.TYPE; + bitk = 0; + bitb = 0; + readAt = writeAt = 0; + + if (checkfn != null) + _codec._Adler32 = check = Adler.Adler32(0, null, 0, 0); + return oldCheck; + } + + + internal int Process(int r) + { + int t; // temporary storage + int b; // bit buffer + int k; // bits in bit buffer + int p; // input data pointer + int n; // bytes available there + int q; // output window write pointer + int m; // bytes to end of window or read pointer + + // copy input/output information to locals (UPDATE macro restores) + + p = _codec.NextIn; + n = _codec.AvailableBytesIn; + b = bitb; + k = bitk; + + q = writeAt; + m = (int)(q < readAt ? readAt - q - 1 : end - q); + + + // process input based on current state + while (true) + { + switch (mode) + { + case InflateBlockMode.TYPE: + + while (k < (3)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + t = (int)(b & 7); + last = t & 1; + + switch ((uint)t >> 1) + { + case 0: // stored + b >>= 3; k -= (3); + t = k & 7; // go to byte boundary + b >>= t; k -= t; + mode = InflateBlockMode.LENS; // get length of stored block + break; + + case 1: // fixed + int[] bl = new int[1]; + int[] bd = new int[1]; + int[][] tl = new int[1][]; + int[][] td = new int[1][]; + InfTree.inflate_trees_fixed(bl, bd, tl, td, _codec); + codes.Init(bl[0], bd[0], tl[0], 0, td[0], 0); + b >>= 3; k -= 3; + mode = InflateBlockMode.CODES; + break; + + case 2: // dynamic + b >>= 3; k -= 3; + mode = InflateBlockMode.TABLE; + break; + + case 3: // illegal + b >>= 3; k -= 3; + mode = InflateBlockMode.BAD; + _codec.Message = "invalid block type"; + r = ZlibConstants.Z_DATA_ERROR; + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + break; + + case InflateBlockMode.LENS: + + while (k < (32)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + ; + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + if ( ( ((~b)>>16) & 0xffff) != (b & 0xffff)) + { + mode = InflateBlockMode.BAD; + _codec.Message = "invalid stored block lengths"; + r = ZlibConstants.Z_DATA_ERROR; + + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + left = (b & 0xffff); + b = k = 0; // dump bits + mode = left != 0 ? InflateBlockMode.STORED : (last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE); + break; + + case InflateBlockMode.STORED: + if (n == 0) + { + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + if (m == 0) + { + if (q == end && readAt != 0) + { + q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q); + } + if (m == 0) + { + writeAt = q; + r = Flush(r); + q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q); + if (q == end && readAt != 0) + { + q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q); + } + if (m == 0) + { + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + } + } + r = ZlibConstants.Z_OK; + + t = left; + if (t > n) + t = n; + if (t > m) + t = m; + Array.Copy(_codec.InputBuffer, p, window, q, t); + p += t; n -= t; + q += t; m -= t; + if ((left -= t) != 0) + break; + mode = last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE; + break; + + case InflateBlockMode.TABLE: + + while (k < (14)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + table = t = (b & 0x3fff); + if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29) + { + mode = InflateBlockMode.BAD; + _codec.Message = "too many length or distance symbols"; + r = ZlibConstants.Z_DATA_ERROR; + + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f); + if (blens == null || blens.Length < t) + { + blens = new int[t]; + } + else + { + Array.Clear(blens, 0, t); + // for (int i = 0; i < t; i++) + // { + // blens[i] = 0; + // } + } + + b >>= 14; + k -= 14; + + + index = 0; + mode = InflateBlockMode.BTREE; + goto case InflateBlockMode.BTREE; + + case InflateBlockMode.BTREE: + while (index < 4 + (table >> 10)) + { + while (k < (3)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + blens[border[index++]] = b & 7; + + b >>= 3; k -= 3; + } + + while (index < 19) + { + blens[border[index++]] = 0; + } + + bb[0] = 7; + t = inftree.inflate_trees_bits(blens, bb, tb, hufts, _codec); + if (t != ZlibConstants.Z_OK) + { + r = t; + if (r == ZlibConstants.Z_DATA_ERROR) + { + blens = null; + mode = InflateBlockMode.BAD; + } + + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + index = 0; + mode = InflateBlockMode.DTREE; + goto case InflateBlockMode.DTREE; + + case InflateBlockMode.DTREE: + while (true) + { + t = table; + if (!(index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))) + { + break; + } + + int i, j, c; + + t = bb[0]; + + while (k < t) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + t = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 1]; + c = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 2]; + + if (c < 16) + { + b >>= t; k -= t; + blens[index++] = c; + } + else + { + // c == 16..18 + i = c == 18 ? 7 : c - 14; + j = c == 18 ? 11 : 3; + + while (k < (t + i)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + b >>= t; k -= t; + + j += (b & InternalInflateConstants.InflateMask[i]); + + b >>= i; k -= i; + + i = index; + t = table; + if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || (c == 16 && i < 1)) + { + blens = null; + mode = InflateBlockMode.BAD; + _codec.Message = "invalid bit length repeat"; + r = ZlibConstants.Z_DATA_ERROR; + + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + c = (c == 16) ? blens[i-1] : 0; + do + { + blens[i++] = c; + } + while (--j != 0); + index = i; + } + } + + tb[0] = -1; + { + int[] bl = new int[] { 9 }; // must be <= 9 for lookahead assumptions + int[] bd = new int[] { 6 }; // must be <= 9 for lookahead assumptions + int[] tl = new int[1]; + int[] td = new int[1]; + + t = table; + t = inftree.inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), blens, bl, bd, tl, td, hufts, _codec); + + if (t != ZlibConstants.Z_OK) + { + if (t == ZlibConstants.Z_DATA_ERROR) + { + blens = null; + mode = InflateBlockMode.BAD; + } + r = t; + + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + codes.Init(bl[0], bd[0], hufts, tl[0], hufts, td[0]); + } + mode = InflateBlockMode.CODES; + goto case InflateBlockMode.CODES; + + case InflateBlockMode.CODES: + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + + r = codes.Process(this, r); + if (r != ZlibConstants.Z_STREAM_END) + { + return Flush(r); + } + + r = ZlibConstants.Z_OK; + p = _codec.NextIn; + n = _codec.AvailableBytesIn; + b = bitb; + k = bitk; + q = writeAt; + m = (int)(q < readAt ? readAt - q - 1 : end - q); + + if (last == 0) + { + mode = InflateBlockMode.TYPE; + break; + } + mode = InflateBlockMode.DRY; + goto case InflateBlockMode.DRY; + + case InflateBlockMode.DRY: + writeAt = q; + r = Flush(r); + q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q); + if (readAt != writeAt) + { + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + mode = InflateBlockMode.DONE; + goto case InflateBlockMode.DONE; + + case InflateBlockMode.DONE: + r = ZlibConstants.Z_STREAM_END; + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + + case InflateBlockMode.BAD: + r = ZlibConstants.Z_DATA_ERROR; + + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + + + default: + r = ZlibConstants.Z_STREAM_ERROR; + + bitb = b; bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + } + } + + + internal void Free() + { + Reset(); + window = null; + hufts = null; + } + + internal void SetDictionary(byte[] d, int start, int n) + { + Array.Copy(d, start, window, 0, n); + readAt = writeAt = n; + } + + // Returns true if inflate is currently at the end of a block generated + // by Z_SYNC_FLUSH or Z_FULL_FLUSH. + internal int SyncPoint() + { + return mode == InflateBlockMode.LENS ? 1 : 0; + } + + // copy as much as possible from the sliding window to the output area + internal int Flush(int r) + { + int nBytes; + + for (int pass=0; pass < 2; pass++) + { + if (pass==0) + { + // compute number of bytes to copy as far as end of window + nBytes = (int)((readAt <= writeAt ? writeAt : end) - readAt); + } + else + { + // compute bytes to copy + nBytes = writeAt - readAt; + } + + // workitem 8870 + if (nBytes == 0) + { + if (r == ZlibConstants.Z_BUF_ERROR) + r = ZlibConstants.Z_OK; + return r; + } + + if (nBytes > _codec.AvailableBytesOut) + nBytes = _codec.AvailableBytesOut; + + if (nBytes != 0 && r == ZlibConstants.Z_BUF_ERROR) + r = ZlibConstants.Z_OK; + + // update counters + _codec.AvailableBytesOut -= nBytes; + _codec.TotalBytesOut += nBytes; + + // update check information + if (checkfn != null) + _codec._Adler32 = check = Adler.Adler32(check, window, readAt, nBytes); + + // copy as far as end of window + Array.Copy(window, readAt, _codec.OutputBuffer, _codec.NextOut, nBytes); + _codec.NextOut += nBytes; + readAt += nBytes; + + // see if more to copy at beginning of window + if (readAt == end && pass == 0) + { + // wrap pointers + readAt = 0; + if (writeAt == end) + writeAt = 0; + } + else pass++; + } + + // done + return r; + } + } + + + internal static class InternalInflateConstants + { + // And'ing with mask[n] masks the lower n bits + internal static readonly int[] InflateMask = new int[] { + 0x00000000, 0x00000001, 0x00000003, 0x00000007, + 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f, + 0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff, + 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff }; + } + + + sealed class InflateCodes + { + // waiting for "i:"=input, + // "o:"=output, + // "x:"=nothing + private const int START = 0; // x: set up for LEN + private const int LEN = 1; // i: get length/literal/eob next + private const int LENEXT = 2; // i: getting length extra (have base) + private const int DIST = 3; // i: get distance next + private const int DISTEXT = 4; // i: getting distance extra + private const int COPY = 5; // o: copying bytes in window, waiting for space + private const int LIT = 6; // o: got literal, waiting for output space + private const int WASH = 7; // o: got eob, possibly still output waiting + private const int END = 8; // x: got eob and all data flushed + private const int BADCODE = 9; // x: got error + + internal int mode; // current inflate_codes mode + + // mode dependent information + internal int len; + + internal int[] tree; // pointer into tree + internal int tree_index = 0; + internal int need; // bits needed + + internal int lit; + + // if EXT or COPY, where and how much + internal int bitsToGet; // bits to get for extra + internal int dist; // distance back to copy from + + internal byte lbits; // ltree bits decoded per branch + internal byte dbits; // dtree bits decoder per branch + internal int[] ltree; // literal/length/eob tree + internal int ltree_index; // literal/length/eob tree + internal int[] dtree; // distance tree + internal int dtree_index; // distance tree + + internal InflateCodes() + { + } + + internal void Init(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index) + { + mode = START; + lbits = (byte)bl; + dbits = (byte)bd; + ltree = tl; + ltree_index = tl_index; + dtree = td; + dtree_index = td_index; + tree = null; + } + + internal int Process(InflateBlocks blocks, int r) + { + int j; // temporary storage + int tindex; // temporary pointer + int e; // extra bits or operation + int b = 0; // bit buffer + int k = 0; // bits in bit buffer + int p = 0; // input data pointer + int n; // bytes available there + int q; // output window write pointer + int m; // bytes to end of window or read pointer + int f; // pointer to copy strings from + + ZlibCodec z = blocks._codec; + + // copy input/output information to locals (UPDATE macro restores) + p = z.NextIn; + n = z.AvailableBytesIn; + b = blocks.bitb; + k = blocks.bitk; + q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + // process input and output based on current state + while (true) + { + switch (mode) + { + // waiting for "i:"=input, "o:"=output, "x:"=nothing + case START: // x: set up for LEN + if (m >= 258 && n >= 10) + { + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + r = InflateFast(lbits, dbits, ltree, ltree_index, dtree, dtree_index, blocks, z); + + p = z.NextIn; + n = z.AvailableBytesIn; + b = blocks.bitb; + k = blocks.bitk; + q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + if (r != ZlibConstants.Z_OK) + { + mode = (r == ZlibConstants.Z_STREAM_END) ? WASH : BADCODE; + break; + } + } + need = lbits; + tree = ltree; + tree_index = ltree_index; + + mode = LEN; + goto case LEN; + + case LEN: // i: get length/literal/eob next + j = need; + + while (k < j) + { + if (n != 0) + r = ZlibConstants.Z_OK; + else + { + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3; + + b >>= (tree[tindex + 1]); + k -= (tree[tindex + 1]); + + e = tree[tindex]; + + if (e == 0) + { + // literal + lit = tree[tindex + 2]; + mode = LIT; + break; + } + if ((e & 16) != 0) + { + // length + bitsToGet = e & 15; + len = tree[tindex + 2]; + mode = LENEXT; + break; + } + if ((e & 64) == 0) + { + // next table + need = e; + tree_index = tindex / 3 + tree[tindex + 2]; + break; + } + if ((e & 32) != 0) + { + // end of block + mode = WASH; + break; + } + mode = BADCODE; // invalid code + z.Message = "invalid literal/length code"; + r = ZlibConstants.Z_DATA_ERROR; + + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + + + case LENEXT: // i: getting length extra (have base) + j = bitsToGet; + + while (k < j) + { + if (n != 0) + r = ZlibConstants.Z_OK; + else + { + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + n--; b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + len += (b & InternalInflateConstants.InflateMask[j]); + + b >>= j; + k -= j; + + need = dbits; + tree = dtree; + tree_index = dtree_index; + mode = DIST; + goto case DIST; + + case DIST: // i: get distance next + j = need; + + while (k < j) + { + if (n != 0) + r = ZlibConstants.Z_OK; + else + { + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + n--; b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3; + + b >>= tree[tindex + 1]; + k -= tree[tindex + 1]; + + e = (tree[tindex]); + if ((e & 0x10) != 0) + { + // distance + bitsToGet = e & 15; + dist = tree[tindex + 2]; + mode = DISTEXT; + break; + } + if ((e & 64) == 0) + { + // next table + need = e; + tree_index = tindex / 3 + tree[tindex + 2]; + break; + } + mode = BADCODE; // invalid code + z.Message = "invalid distance code"; + r = ZlibConstants.Z_DATA_ERROR; + + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + + + case DISTEXT: // i: getting distance extra + j = bitsToGet; + + while (k < j) + { + if (n != 0) + r = ZlibConstants.Z_OK; + else + { + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + n--; b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + dist += (b & InternalInflateConstants.InflateMask[j]); + + b >>= j; + k -= j; + + mode = COPY; + goto case COPY; + + case COPY: // o: copying bytes in window, waiting for space + f = q - dist; + while (f < 0) + { + // modulo window size-"while" instead + f += blocks.end; // of "if" handles invalid distances + } + while (len != 0) + { + if (m == 0) + { + if (q == blocks.end && blocks.readAt != 0) + { + q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + } + if (m == 0) + { + blocks.writeAt = q; r = blocks.Flush(r); + q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + if (q == blocks.end && blocks.readAt != 0) + { + q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + } + + if (m == 0) + { + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + } + } + + blocks.window[q++] = blocks.window[f++]; m--; + + if (f == blocks.end) + f = 0; + len--; + } + mode = START; + break; + + case LIT: // o: got literal, waiting for output space + if (m == 0) + { + if (q == blocks.end && blocks.readAt != 0) + { + q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + } + if (m == 0) + { + blocks.writeAt = q; r = blocks.Flush(r); + q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + if (q == blocks.end && blocks.readAt != 0) + { + q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + } + if (m == 0) + { + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + } + } + r = ZlibConstants.Z_OK; + + blocks.window[q++] = (byte)lit; m--; + + mode = START; + break; + + case WASH: // o: got eob, possibly more output + if (k > 7) + { + // return unused byte, if any + k -= 8; + n++; + p--; // can always return one + } + + blocks.writeAt = q; r = blocks.Flush(r); + q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + if (blocks.readAt != blocks.writeAt) + { + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + mode = END; + goto case END; + + case END: + r = ZlibConstants.Z_STREAM_END; + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + + case BADCODE: // x: got error + + r = ZlibConstants.Z_DATA_ERROR; + + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + + default: + r = ZlibConstants.Z_STREAM_ERROR; + + blocks.bitb = b; blocks.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + } + } + + + // Called with number of bytes left to write in window at least 258 + // (the maximum string length) and number of input bytes available + // at least ten. The ten bytes are six bytes for the longest length/ + // distance pair plus four bytes for overloading the bit buffer. + + internal int InflateFast(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index, InflateBlocks s, ZlibCodec z) + { + int t; // temporary pointer + int[] tp; // temporary pointer + int tp_index; // temporary pointer + int e; // extra bits or operation + int b; // bit buffer + int k; // bits in bit buffer + int p; // input data pointer + int n; // bytes available there + int q; // output window write pointer + int m; // bytes to end of window or read pointer + int ml; // mask for literal/length tree + int md; // mask for distance tree + int c; // bytes to copy + int d; // distance back to copy from + int r; // copy source pointer + + int tp_index_t_3; // (tp_index+t)*3 + + // load input, output, bit values + p = z.NextIn; n = z.AvailableBytesIn; b = s.bitb; k = s.bitk; + q = s.writeAt; m = q < s.readAt ? s.readAt - q - 1 : s.end - q; + + // initialize masks + ml = InternalInflateConstants.InflateMask[bl]; + md = InternalInflateConstants.InflateMask[bd]; + + // do until not enough input or output space for fast loop + do + { + // assume called with m >= 258 && n >= 10 + // get literal/length code + while (k < (20)) + { + // max bits for literal/length code + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; + } + + t = b & ml; + tp = tl; + tp_index = tl_index; + tp_index_t_3 = (tp_index + t) * 3; + if ((e = tp[tp_index_t_3]) == 0) + { + b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]); + + s.window[q++] = (byte)tp[tp_index_t_3 + 2]; + m--; + continue; + } + do + { + + b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]); + + if ((e & 16) != 0) + { + e &= 15; + c = tp[tp_index_t_3 + 2] + ((int)b & InternalInflateConstants.InflateMask[e]); + + b >>= e; k -= e; + + // decode distance base of block to copy + while (k < 15) + { + // max bits for distance code + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; + } + + t = b & md; + tp = td; + tp_index = td_index; + tp_index_t_3 = (tp_index + t) * 3; + e = tp[tp_index_t_3]; + + do + { + + b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]); + + if ((e & 16) != 0) + { + // get extra bits to add to distance base + e &= 15; + while (k < e) + { + // get extra bits (up to 13) + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; + } + + d = tp[tp_index_t_3 + 2] + (b & InternalInflateConstants.InflateMask[e]); + + b >>= e; k -= e; + + // do the copy + m -= c; + if (q >= d) + { + // offset before dest + // just copy + r = q - d; + if (q - r > 0 && 2 > (q - r)) + { + s.window[q++] = s.window[r++]; // minimum count is three, + s.window[q++] = s.window[r++]; // so unroll loop a little + c -= 2; + } + else + { + Array.Copy(s.window, r, s.window, q, 2); + q += 2; r += 2; c -= 2; + } + } + else + { + // else offset after destination + r = q - d; + do + { + r += s.end; // force pointer in window + } + while (r < 0); // covers invalid distances + e = s.end - r; + if (c > e) + { + // if source crosses, + c -= e; // wrapped copy + if (q - r > 0 && e > (q - r)) + { + do + { + s.window[q++] = s.window[r++]; + } + while (--e != 0); + } + else + { + Array.Copy(s.window, r, s.window, q, e); + q += e; r += e; e = 0; + } + r = 0; // copy rest from start of window + } + } + + // copy all or what's left + if (q - r > 0 && c > (q - r)) + { + do + { + s.window[q++] = s.window[r++]; + } + while (--c != 0); + } + else + { + Array.Copy(s.window, r, s.window, q, c); + q += c; r += c; c = 0; + } + break; + } + else if ((e & 64) == 0) + { + t += tp[tp_index_t_3 + 2]; + t += (b & InternalInflateConstants.InflateMask[e]); + tp_index_t_3 = (tp_index + t) * 3; + e = tp[tp_index_t_3]; + } + else + { + z.Message = "invalid distance code"; + + c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3); + + s.bitb = b; s.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + s.writeAt = q; + + return ZlibConstants.Z_DATA_ERROR; + } + } + while (true); + break; + } + + if ((e & 64) == 0) + { + t += tp[tp_index_t_3 + 2]; + t += (b & InternalInflateConstants.InflateMask[e]); + tp_index_t_3 = (tp_index + t) * 3; + if ((e = tp[tp_index_t_3]) == 0) + { + b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]); + s.window[q++] = (byte)tp[tp_index_t_3 + 2]; + m--; + break; + } + } + else if ((e & 32) != 0) + { + c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3); + + s.bitb = b; s.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + s.writeAt = q; + + return ZlibConstants.Z_STREAM_END; + } + else + { + z.Message = "invalid literal/length code"; + + c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3); + + s.bitb = b; s.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + s.writeAt = q; + + return ZlibConstants.Z_DATA_ERROR; + } + } + while (true); + } + while (m >= 258 && n >= 10); + + // not enough input or output--restore pointers and return + c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3); + + s.bitb = b; s.bitk = k; + z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; + s.writeAt = q; + + return ZlibConstants.Z_OK; + } + } + + + internal sealed class InflateManager + { + // preset dictionary flag in zlib header + private const int PRESET_DICT = 0x20; + + private const int Z_DEFLATED = 8; + + private enum InflateManagerMode + { + METHOD = 0, // waiting for method byte + FLAG = 1, // waiting for flag byte + DICT4 = 2, // four dictionary check bytes to go + DICT3 = 3, // three dictionary check bytes to go + DICT2 = 4, // two dictionary check bytes to go + DICT1 = 5, // one dictionary check byte to go + DICT0 = 6, // waiting for inflateSetDictionary + BLOCKS = 7, // decompressing blocks + CHECK4 = 8, // four check bytes to go + CHECK3 = 9, // three check bytes to go + CHECK2 = 10, // two check bytes to go + CHECK1 = 11, // one check byte to go + DONE = 12, // finished check, done + BAD = 13, // got an error--stay here + } + + private InflateManagerMode mode; // current inflate mode + internal ZlibCodec _codec; // pointer back to this zlib stream + + // mode dependent information + internal int method; // if FLAGS, method byte + + // if CHECK, check values to compare + internal uint computedCheck; // computed check value + internal uint expectedCheck; // stream check value + + // if BAD, inflateSync's marker bytes count + internal int marker; + + // mode independent information + //internal int nowrap; // flag for no wrapper + private bool _handleRfc1950HeaderBytes = true; + internal bool HandleRfc1950HeaderBytes + { + get { return _handleRfc1950HeaderBytes; } + set { _handleRfc1950HeaderBytes = value; } + } + internal int wbits; // log2(window size) (8..15, defaults to 15) + + internal InflateBlocks blocks; // current inflate_blocks state + + public InflateManager() { } + + public InflateManager(bool expectRfc1950HeaderBytes) + { + _handleRfc1950HeaderBytes = expectRfc1950HeaderBytes; + } + + internal int Reset() + { + _codec.TotalBytesIn = _codec.TotalBytesOut = 0; + _codec.Message = null; + mode = HandleRfc1950HeaderBytes ? InflateManagerMode.METHOD : InflateManagerMode.BLOCKS; + blocks.Reset(); + return ZlibConstants.Z_OK; + } + + internal int End() + { + if (blocks != null) + blocks.Free(); + blocks = null; + return ZlibConstants.Z_OK; + } + + internal int Initialize(ZlibCodec codec, int w) + { + _codec = codec; + _codec.Message = null; + blocks = null; + + // handle undocumented nowrap option (no zlib header or check) + //nowrap = 0; + //if (w < 0) + //{ + // w = - w; + // nowrap = 1; + //} + + // set window size + if (w < 8 || w > 15) + { + End(); + throw new ZlibException("Bad window size."); + + //return ZlibConstants.Z_STREAM_ERROR; + } + wbits = w; + + blocks = new InflateBlocks(codec, + HandleRfc1950HeaderBytes ? this : null, + 1 << w); + + // reset state + Reset(); + return ZlibConstants.Z_OK; + } + + + internal int Inflate(FlushType flush) + { + int b; + + if (_codec.InputBuffer == null) + throw new ZlibException("InputBuffer is null. "); + +// int f = (flush == FlushType.Finish) +// ? ZlibConstants.Z_BUF_ERROR +// : ZlibConstants.Z_OK; + + // workitem 8870 + int f = ZlibConstants.Z_OK; + int r = ZlibConstants.Z_BUF_ERROR; + + while (true) + { + switch (mode) + { + case InflateManagerMode.METHOD: + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + if (((method = _codec.InputBuffer[_codec.NextIn++]) & 0xf) != Z_DEFLATED) + { + mode = InflateManagerMode.BAD; + _codec.Message = String.Format("unknown compression method (0x{0:X2})", method); + marker = 5; // can't try inflateSync + break; + } + if ((method >> 4) + 8 > wbits) + { + mode = InflateManagerMode.BAD; + _codec.Message = String.Format("invalid window size ({0})", (method >> 4) + 8); + marker = 5; // can't try inflateSync + break; + } + mode = InflateManagerMode.FLAG; + break; + + + case InflateManagerMode.FLAG: + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + b = (_codec.InputBuffer[_codec.NextIn++]) & 0xff; + + if ((((method << 8) + b) % 31) != 0) + { + mode = InflateManagerMode.BAD; + _codec.Message = "incorrect header check"; + marker = 5; // can't try inflateSync + break; + } + + mode = ((b & PRESET_DICT) == 0) + ? InflateManagerMode.BLOCKS + : InflateManagerMode.DICT4; + break; + + case InflateManagerMode.DICT4: + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000); + mode = InflateManagerMode.DICT3; + break; + + case InflateManagerMode.DICT3: + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000); + mode = InflateManagerMode.DICT2; + break; + + case InflateManagerMode.DICT2: + + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00); + mode = InflateManagerMode.DICT1; + break; + + + case InflateManagerMode.DICT1: + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; _codec.TotalBytesIn++; + expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff); + _codec._Adler32 = expectedCheck; + mode = InflateManagerMode.DICT0; + return ZlibConstants.Z_NEED_DICT; + + + case InflateManagerMode.DICT0: + mode = InflateManagerMode.BAD; + _codec.Message = "need dictionary"; + marker = 0; // can try inflateSync + return ZlibConstants.Z_STREAM_ERROR; + + + case InflateManagerMode.BLOCKS: + r = blocks.Process(r); + if (r == ZlibConstants.Z_DATA_ERROR) + { + mode = InflateManagerMode.BAD; + marker = 0; // can try inflateSync + break; + } + + if (r == ZlibConstants.Z_OK) r = f; + + if (r != ZlibConstants.Z_STREAM_END) + return r; + + r = f; + computedCheck = blocks.Reset(); + if (!HandleRfc1950HeaderBytes) + { + mode = InflateManagerMode.DONE; + return ZlibConstants.Z_STREAM_END; + } + mode = InflateManagerMode.CHECK4; + break; + + case InflateManagerMode.CHECK4: + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000); + mode = InflateManagerMode.CHECK3; + break; + + case InflateManagerMode.CHECK3: + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; _codec.TotalBytesIn++; + expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000); + mode = InflateManagerMode.CHECK2; + break; + + case InflateManagerMode.CHECK2: + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00); + mode = InflateManagerMode.CHECK1; + break; + + case InflateManagerMode.CHECK1: + if (_codec.AvailableBytesIn == 0) return r; + r = f; + _codec.AvailableBytesIn--; _codec.TotalBytesIn++; + expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff); + if (computedCheck != expectedCheck) + { + mode = InflateManagerMode.BAD; + _codec.Message = "incorrect data check"; + marker = 5; // can't try inflateSync + break; + } + mode = InflateManagerMode.DONE; + return ZlibConstants.Z_STREAM_END; + + case InflateManagerMode.DONE: + return ZlibConstants.Z_STREAM_END; + + case InflateManagerMode.BAD: + throw new ZlibException(String.Format("Bad state ({0})", _codec.Message)); + + default: + throw new ZlibException("Stream error."); + + } + } + } + + + + internal int SetDictionary(byte[] dictionary) + { + int index = 0; + int length = dictionary.Length; + if (mode != InflateManagerMode.DICT0) + throw new ZlibException("Stream error."); + + if (Adler.Adler32(1, dictionary, 0, dictionary.Length) != _codec._Adler32) + { + return ZlibConstants.Z_DATA_ERROR; + } + + _codec._Adler32 = Adler.Adler32(0, null, 0, 0); + + if (length >= (1 << wbits)) + { + length = (1 << wbits) - 1; + index = dictionary.Length - length; + } + blocks.SetDictionary(dictionary, index, length); + mode = InflateManagerMode.BLOCKS; + return ZlibConstants.Z_OK; + } + + + private static readonly byte[] mark = new byte[] { 0, 0, 0xff, 0xff }; + + internal int Sync() + { + int n; // number of bytes to look at + int p; // pointer to bytes + int m; // number of marker bytes found in a row + long r, w; // temporaries to save total_in and total_out + + // set up + if (mode != InflateManagerMode.BAD) + { + mode = InflateManagerMode.BAD; + marker = 0; + } + if ((n = _codec.AvailableBytesIn) == 0) + return ZlibConstants.Z_BUF_ERROR; + p = _codec.NextIn; + m = marker; + + // search + while (n != 0 && m < 4) + { + if (_codec.InputBuffer[p] == mark[m]) + { + m++; + } + else if (_codec.InputBuffer[p] != 0) + { + m = 0; + } + else + { + m = 4 - m; + } + p++; n--; + } + + // restore + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + _codec.AvailableBytesIn = n; + marker = m; + + // return no joy or set up to restart on a new block + if (m != 4) + { + return ZlibConstants.Z_DATA_ERROR; + } + r = _codec.TotalBytesIn; + w = _codec.TotalBytesOut; + Reset(); + _codec.TotalBytesIn = r; + _codec.TotalBytesOut = w; + mode = InflateManagerMode.BLOCKS; + return ZlibConstants.Z_OK; + } + + + // Returns true if inflate is currently at the end of a block generated + // by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP + // implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH + // but removes the length bytes of the resulting empty stored block. When + // decompressing, PPP checks that at the end of input packet, inflate is + // waiting for these length bytes. + internal int SyncPoint(ZlibCodec z) + { + return blocks.SyncPoint(); + } + } +} \ No newline at end of file diff --git a/NBToolkit/NBToolkit/Zlib/LICENSE.txt b/NBToolkit/NBToolkit/Zlib/LICENSE.txt new file mode 100644 index 0000000..cdce500 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/LICENSE.txt @@ -0,0 +1,29 @@ +JZlib 0.0.* were released under the GNU LGPL license. Later, we have switched +over to a BSD-style license. + +------------------------------------------------------------------------------ +Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + + 3. The names of the authors may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/NBToolkit/NBToolkit/Zlib/ParallelDeflateOutputStream.cs b/NBToolkit/NBToolkit/Zlib/ParallelDeflateOutputStream.cs new file mode 100644 index 0000000..5459de0 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/ParallelDeflateOutputStream.cs @@ -0,0 +1,1158 @@ +//#define Trace + +// ParallelDeflateOutputStream.cs +// ------------------------------------------------------------------ +// +// A DeflateStream that does compression only, it uses a +// divide-and-conquer approach with multiple threads to exploit multiple +// CPUs for the DEFLATE computation. +// +// last saved: +// Time-stamp: <2010-January-20 19:24:58> +// ------------------------------------------------------------------ +// +// Copyright (c) 2009-2010 by Dino Chiesa +// All rights reserved! +// +// ------------------------------------------------------------------ + +using System; +using System.Threading; +using Ionic.Zlib; +using System.IO; + + +namespace Ionic.Zlib +{ + internal class WorkItem + { + internal enum Status { None=0, Filling=1, Filled=2, Compressing=3, Compressed=4, Writing=5, Done=6 } + public byte[] buffer; + public byte[] compressed; + public int status; + public int crc; + public int index; + public int inputBytesAvailable; + public int compressedBytesAvailable; + public ZlibCodec compressor; + + public WorkItem(int size, Ionic.Zlib.CompressionLevel compressLevel, CompressionStrategy strategy) + { + buffer= new byte[size]; + // alloc 5 bytes overhead for every block (margin of safety= 2) + int n = size + ((size / 32768)+1) * 5 * 2; + compressed= new byte[n]; + + status = (int)Status.None; + compressor = new ZlibCodec(); + compressor.InitializeDeflate(compressLevel, false); + compressor.OutputBuffer = compressed; + compressor.InputBuffer = buffer; + } + } + + /// + /// A class for compressing and decompressing streams using the + /// Deflate algorithm with multiple threads. + /// + /// + /// + /// + /// This class is for compression only, and that can be only + /// through writing. + /// + /// + /// + /// For more information on the Deflate algorithm, see IETF RFC 1951, "DEFLATE + /// Compressed Data Format Specification version 1.3." + /// + /// + /// + /// This class is similar to , except + /// that this implementation uses an approach that employs multiple worker + /// threads to perform the DEFLATE. On a multi-cpu or multi-core computer, + /// the performance of this class can be significantly higher than the + /// single-threaded DeflateStream, particularly for larger streams. How + /// large? Anything over 10mb is a good candidate for parallel compression. + /// + /// + /// + /// The tradeoff is that this class uses more memory and more CPU than the + /// vanilla DeflateStream, and also is less efficient as a compressor. For + /// large files the size of the compressed data stream can be less than 1% + /// larger than the size of a compressed data stream from the vanialla + /// DeflateStream. For smaller files the difference can be larger. The + /// difference will also be larger if you set the BufferSize to be lower + /// than the default value. Your mileage may vary. Finally, for small + /// files, the ParallelDeflateOutputStream can be much slower than the vanilla + /// DeflateStream, because of the overhead of using the thread pool. + /// + /// + /// + /// + public class ParallelDeflateOutputStream : System.IO.Stream + { + + private static readonly int IO_BUFFER_SIZE_DEFAULT = 64 * 1024; // 128k + + private System.Collections.Generic.List _pool; + private bool _leaveOpen; + private System.IO.Stream _outStream; + private int _nextToFill, _nextToWrite; + private int _bufferSize = IO_BUFFER_SIZE_DEFAULT; + private ManualResetEvent _writingDone; + private ManualResetEvent _sessionReset; + private bool _noMoreInputForThisSegment; + private object _outputLock = new object(); + private bool _isClosed; + private bool _isDisposed; + private bool _firstWriteDone; + private int _pc; + private int _Crc32; + private Int64 _totalBytesProcessed; + private Ionic.Zlib.CompressionLevel _compressLevel; + private volatile Exception _pendingException; + private object _eLock = new Object(); // protects _pendingException + + // This bitfield is used only when Trace is defined. + //private TraceBits _DesiredTrace = TraceBits.Write | TraceBits.WriteBegin | + //TraceBits.WriteDone | TraceBits.Lifecycle | TraceBits.Fill | TraceBits.Flush | + //TraceBits.Session; + + //private TraceBits _DesiredTrace = TraceBits.WriteBegin | TraceBits.WriteDone | TraceBits.Synch | TraceBits.Lifecycle | TraceBits.Session ; + + private TraceBits _DesiredTrace = TraceBits.WriterThread | TraceBits.Synch | TraceBits.Lifecycle | TraceBits.Session ; + + /// + /// Create a ParallelDeflateOutputStream. + /// + /// + /// + /// + /// This stream compresses data written into it via the DEFLATE + /// algorithm (see RFC 1951), and writes out the compressed byte stream. + /// + /// + /// + /// The instance will use the default compression level, the default + /// buffer sizes and the default number of threads and buffers per + /// thread. + /// + /// + /// + /// This class is similar to , + /// except that this implementation uses an approach that employs + /// multiple worker threads to perform the DEFLATE. On a multi-cpu or + /// multi-core computer, the performance of this class can be + /// significantly higher than the single-threaded DeflateStream, + /// particularly for larger streams. How large? Anything over 10mb is + /// a good candidate for parallel compression. + /// + /// + /// + /// + /// + /// + /// This example shows how to use a ParallelDeflateOutputStream to compress + /// data. It reads a file, compresses it, and writes the compressed data to + /// a second, output file. + /// + /// + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n= -1; + /// String outputFile = fileToCompress + ".compressed"; + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (var raw = System.IO.File.Create(outputFile)) + /// { + /// using (Stream compressor = new ParallelDeflateOutputStream(raw)) + /// { + /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0) + /// { + /// compressor.Write(buffer, 0, n); + /// } + /// } + /// } + /// } + /// + /// + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Dim outputFile As String = (fileToCompress & ".compressed") + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using raw As FileStream = File.Create(outputFile) + /// Using compressor As Stream = New ParallelDeflateOutputStream(raw) + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// End Using + /// + /// + /// The stream to which compressed data will be written. + public ParallelDeflateOutputStream(System.IO.Stream stream) + : this(stream, CompressionLevel.Default, CompressionStrategy.Default, false) + { + } + + /// + /// Create a ParallelDeflateOutputStream using the specified CompressionLevel. + /// + /// + /// See the + /// constructor for example code. + /// + /// The stream to which compressed data will be written. + /// A tuning knob to trade speed for effectiveness. + public ParallelDeflateOutputStream(System.IO.Stream stream, CompressionLevel level) + : this(stream, level, CompressionStrategy.Default, false) + { + } + + /// + /// Create a ParallelDeflateOutputStream and specify whether to leave the captive stream open + /// when the ParallelDeflateOutputStream is closed. + /// + /// + /// See the + /// constructor for example code. + /// + /// The stream to which compressed data will be written. + /// + /// true if the application would like the stream to remain open after inflation/deflation. + /// + public ParallelDeflateOutputStream(System.IO.Stream stream, bool leaveOpen) + : this(stream, CompressionLevel.Default, CompressionStrategy.Default, leaveOpen) + { + } + + /// + /// Create a ParallelDeflateOutputStream and specify whether to leave the captive stream open + /// when the ParallelDeflateOutputStream is closed. + /// + /// + /// See the + /// constructor for example code. + /// + /// The stream to which compressed data will be written. + /// A tuning knob to trade speed for effectiveness. + /// + /// true if the application would like the stream to remain open after inflation/deflation. + /// + public ParallelDeflateOutputStream(System.IO.Stream stream, CompressionLevel level, bool leaveOpen) + : this(stream, CompressionLevel.Default, CompressionStrategy.Default, leaveOpen) + { + } + + /// + /// Create a ParallelDeflateOutputStream using the specified + /// CompressionLevel and CompressionStrategy, and specifying whether to + /// leave the captive stream open when the ParallelDeflateOutputStream is + /// closed. + /// + /// + /// See the + /// constructor for example code. + /// + /// The stream to which compressed data will be written. + /// A tuning knob to trade speed for effectiveness. + /// + /// By tweaking this parameter, you may be able to optimize the compression for + /// data with particular characteristics. + /// + /// + /// true if the application would like the stream to remain open after inflation/deflation. + /// + public ParallelDeflateOutputStream(System.IO.Stream stream, + CompressionLevel level, + CompressionStrategy strategy, + bool leaveOpen) + { + TraceOutput(TraceBits.Lifecycle | TraceBits.Session, "-------------------------------------------------------"); + TraceOutput(TraceBits.Lifecycle | TraceBits.Session, "Create {0:X8}", this.GetHashCode()); + _compressLevel= level; + _leaveOpen = leaveOpen; + Strategy = strategy; + + BuffersPerCore = 4; // default + + _writingDone = new ManualResetEvent(false); + _sessionReset = new ManualResetEvent(false); + + _outStream = stream; + } + + + /// + /// The ZLIB strategy to be used during compression. + /// + /// + public CompressionStrategy Strategy + { + get; + private set; + } + + /// + /// The number of buffers per CPU or CPU core. + /// + /// + /// + /// + /// This property sets the number of memory buffers to create, for every + /// CPU or CPU core in the machine. The divide-and-conquer approach + /// taken by this class assumes a single thread from the application + /// will call Write(). There will be multiple background threads that + /// then compress (DEFLATE) the data written into the stream, and also a + /// single output thread, also operating in the background, aggregating + /// those results and finally emitting the output. + /// + /// + /// + /// The default value is 4. Different values may deliver better or + /// worse results, depending on the dynamic performance characteristics + /// of your storage and compute resources. + /// + /// + /// + /// The total amount of storage space allocated for buffering will be + /// (n*M*S*2), where n is the number of CPUs, M is the multiple (this + /// property), S is the size of each buffer (), + /// and there are 2 buffers used by the compressor, one for input and + /// one for output. For example, if your machine has 4 cores, and you + /// set BuffersPerCore to 3, and you retain the default buffer size of + /// 128k, then the ParallelDeflateOutputStream will use 3mb of buffer + /// memory in total. + /// + /// + /// + /// The application can set this value at any time, but it is effective + /// only before the first call to Write(), which is when the buffers are + /// allocated. + /// + /// + public int BuffersPerCore + { + get; set; + } + + /// + /// The size of the buffers used by the compressor threads. + /// + /// + /// + /// + /// The default buffer size is 128k. The application can set this value at any + /// time, but it is effective only before the first Write(). + /// + /// + /// + /// Larger buffer sizes implies larger memory consumption but allows + /// more efficient compression. Using smaller buffer sizes consumes less + /// memory but result in less effective compression. For example, using + /// the default buffer size of 128k, the compression delivered is within + /// 1% of the compression delivered by the single-threaded . On the other hand, using a + /// BufferSize of 8k can result in a compressed data stream that is 5% + /// larger than that delivered by the single-threaded + /// DeflateStream. Excessively small buffer sizes can also cause + /// the speed of the ParallelDeflateOutputStream to drop, because of + /// larger thread scheduling overhead dealing with many many small + /// buffers. + /// + /// + /// + /// The total amount of storage space allocated for buffering will be + /// (n*M*S*2), where n is the number of CPUs, M is the multiple (), S is the size of each buffer (this + /// property), and there are 2 buffers used by the compressor, one for + /// input and one for output. For example, if your machine has a total + /// of 4 cores, and if you set to 3, and + /// you keep the default buffer size of 128k, then the + /// ParallelDeflateOutputStream will use 3mb of buffer memory in + /// total. + /// + /// + /// + public int BufferSize + { + get { return _bufferSize;} + set + { + if (value < 1024) + throw new ArgumentException(); + _bufferSize = value; + } + } + + /// + /// The CRC32 for the data that was written out, prior to compression. + /// + /// + /// This value is meaningful only after a call to Close(). + /// + public int Crc32 { get { return _Crc32; } } + + + /// + /// The total number of uncompressed bytes processed by the ParallelDeflateOutputStream. + /// + /// + /// This value is meaningful only after a call to Close(). + /// + public Int64 BytesProcessed { get { return _totalBytesProcessed; } } + + + private void _InitializePoolOfWorkItems() + { + _pool = new System.Collections.Generic.List(); + for(int i=0; i < BuffersPerCore * Environment.ProcessorCount; i++) + _pool.Add(new WorkItem(_bufferSize, _compressLevel, Strategy)); + _pc = _pool.Count; + + for(int i=0; i < _pc; i++) + _pool[i].index= i; + + // set the pointers + _nextToFill= _nextToWrite= 0; + } + + + private void _KickoffWriter() + { + if (!ThreadPool.QueueUserWorkItem(new WaitCallback(this._PerpetualWriterMethod))) + throw new Exception("Cannot enqueue writer thread."); + } + + + /// + /// Write data to the stream. + /// + /// + /// + /// + /// + /// To use the ParallelDeflateOutputStream to compress data, create a + /// ParallelDeflateOutputStream with CompressionMode.Compress, passing a + /// writable output stream. Then call Write() on that + /// ParallelDeflateOutputStream, providing uncompressed data as input. The + /// data sent to the output stream will be the compressed form of the data + /// written. + /// + /// + /// + /// To decompress data, use the class. + /// + /// + /// + /// The buffer holding data to write to the stream. + /// the offset within that data array to find the first byte to write. + /// the number of bytes to write. + public override void Write(byte[] buffer, int offset, int count) + { + // Fill a work buffer; when full, flip state to 'Filled' + + if (_isClosed) + throw new NotSupportedException(); + + // dispense any exceptions that occurred on the BG threads + if (_pendingException != null) + throw _pendingException; + + if (count == 0) return; + + if (!_firstWriteDone) + { + // Want to do this on first Write, first session, and not in the + // constructor. We want to allow the BufferSize and BuffersPerCore to + // change after construction, but before first Write. + _InitializePoolOfWorkItems(); + + // Only do this once (ever), the first time Write() is called: + _KickoffWriter(); + + // Release the writer thread. + TraceOutput(TraceBits.Synch, "Synch _sessionReset.Set() Write (first)"); + _sessionReset.Set(); + + _firstWriteDone = true; + } + + + do + { + int ix = _nextToFill % _pc; + WorkItem workitem = _pool[ix]; + lock(workitem) + { + TraceOutput(TraceBits.Fill, + "Fill lock wi({0}) stat({1}) iba({2}) nf({3})", + workitem.index, + workitem.status, + workitem.inputBytesAvailable, + _nextToFill + ); + + // If the status is what we want, then use the workitem. + if (workitem.status == (int)WorkItem.Status.None || + workitem.status == (int)WorkItem.Status.Done || + workitem.status == (int)WorkItem.Status.Filling) + { + workitem.status = (int)WorkItem.Status.Filling; + int limit = ((workitem.buffer.Length - workitem.inputBytesAvailable) > count) + ? count + : (workitem.buffer.Length - workitem.inputBytesAvailable); + + // copy from the provided buffer to our workitem, starting at + // the tail end of whatever data we might have in there currently. + Array.Copy(buffer, offset, workitem.buffer, workitem.inputBytesAvailable, limit); + + count -= limit; + offset += limit; + workitem.inputBytesAvailable += limit; + if (workitem.inputBytesAvailable==workitem.buffer.Length) + { + workitem.status = (int)WorkItem.Status.Filled; + // No need for interlocked.increment: the Write() method + // is documented as not multi-thread safe, so we can assume Write() + // calls come in from only one thread. + _nextToFill++; + + TraceOutput(TraceBits.Fill, + "Fill QUWI wi({0}) stat({1}) iba({2}) nf({3})", + workitem.index, + workitem.status, + workitem.inputBytesAvailable, + _nextToFill + ); + + if (!ThreadPool.QueueUserWorkItem( _DeflateOne, workitem )) + throw new Exception("Cannot enqueue workitem"); + } + + } + else + { + int wcycles= 0; + + while (workitem.status != (int)WorkItem.Status.None && + workitem.status != (int)WorkItem.Status.Done && + workitem.status != (int)WorkItem.Status.Filling) + { + TraceOutput(TraceBits.Fill, + "Fill waiting wi({0}) stat({1}) nf({2})", + workitem.index, + workitem.status, + _nextToFill); + + wcycles++; + + Monitor.Pulse(workitem); + Monitor.Wait(workitem); + + if (workitem.status == (int)WorkItem.Status.None || + workitem.status == (int)WorkItem.Status.Done || + workitem.status == (int)WorkItem.Status.Filling) + TraceOutput(TraceBits.Fill, + "Fill A-OK wi({0}) stat({1}) iba({2}) cyc({3})", + workitem.index, + workitem.status, + workitem.inputBytesAvailable, + wcycles); + } + } + } + } + while (count > 0); // until no more to write + + return; + } + + + + /// + /// Flush the stream. + /// + public override void Flush() + { + _Flush(false); + } + + + private void _Flush(bool lastInput) + { + if (_isClosed) + throw new NotSupportedException(); + + // pass any partial buffer out to the compressor workers: + WorkItem workitem = _pool[_nextToFill % _pc]; + lock(workitem) + { + if ( workitem.status == (int)WorkItem.Status.Filling) + { + workitem.status = (int)WorkItem.Status.Filled; + _nextToFill++; + + // When flush is called from Close(), we set _noMore. + // can't do it before updating nextToFill, though. + if (lastInput) + _noMoreInputForThisSegment= true; + + TraceOutput(TraceBits.Flush, + "Flush filled wi({0}) iba({1}) nf({2}) nomore({3})", + workitem.index, workitem.inputBytesAvailable, _nextToFill, _noMoreInputForThisSegment); + + if (!ThreadPool.QueueUserWorkItem( _DeflateOne, workitem )) + throw new Exception("Cannot enqueue workitem"); + + //Monitor.Pulse(workitem); + } + else + { + // When flush is called from Close(), we set _noMore. + // Gotta do this whether or not there is another packet to send along. + if (lastInput) + _noMoreInputForThisSegment= true; + + TraceOutput(TraceBits.Flush, + "Flush noaction wi({0}) stat({1}) nf({2}) nomore({3})", + workitem.index, workitem.status, _nextToFill, _noMoreInputForThisSegment); + } + } + } + + + /// + /// Close the stream. + /// + /// + /// You must call Close on the stream to guarantee that all of the data written in has + /// been compressed, and the compressed data has been written out. + /// + public override void Close() + { + TraceOutput(TraceBits.Session, "Close {0:X8}", this.GetHashCode()); + + if (_isClosed) return; + + _Flush(true); + + //System.Diagnostics.StackTrace st = new System.Diagnostics.StackTrace(1); + //System.Console.WriteLine(st.ToString()); + + // need to get Writer off the workitem, in case he's waiting forever + WorkItem workitem = _pool[_nextToFill % _pc]; + lock(workitem) + { + Monitor.PulseAll(workitem); + } + + // wait for the writer to complete his work + TraceOutput(TraceBits.Synch, "Synch _writingDone.WaitOne(begin) Close"); + _writingDone.WaitOne(); + TraceOutput(TraceBits.Synch, "Synch _writingDone.WaitOne(done) Close"); + + TraceOutput(TraceBits.Session, "-------------------------------------------------------"); + if (!_leaveOpen) + _outStream.Close(); + + _isClosed= true; + } + + + +// /// The destructor +// ~ParallelDeflateOutputStream() +// { +// TraceOutput(TraceBits.Lifecycle, "Destructor {0:X8}", this.GetHashCode()); +// // call Dispose with false. Since we're in the +// // destructor call, the managed resources will be +// // disposed of anyways. +// Dispose(false); +// } + + + + // workitem 10030 - implement a new Dispose method + + /// Dispose the object + /// + /// + /// Because ParallelDeflateOutputStream is IDisposable, the + /// application must call this method when finished using the instance. + /// + /// + /// This method is generally called implicitly upon exit from + /// a using scope in C# (Using in VB). + /// + /// + new public void Dispose() + { + TraceOutput(TraceBits.Lifecycle, "Dispose {0:X8}", this.GetHashCode()); + _isDisposed= true; + _pool = null; + TraceOutput(TraceBits.Synch, "Synch _sessionReset.Set() Dispose"); + _sessionReset.Set(); // tell writer to die + Dispose(true); + } + + + + /// The Dispose method + protected override void Dispose(bool disposeManagedResources) + { + if (disposeManagedResources) + { + // dispose managed resources + _writingDone.Close(); + _sessionReset.Close(); + } + } + + + /// + /// Resets the stream for use with another stream. + /// + /// + /// Because the ParallelDeflateOutputStream is expensive to create, it + /// has been designed so that it can be recycled and re-used. You have + /// to call Close() on the stream first, then you can call Reset() on + /// it, to use it again on another stream. + /// + /// + /// + /// + /// ParallelDeflateOutputStream deflater = null; + /// foreach (var inputFile in listOfFiles) + /// { + /// string outputFile = inputFile + ".compressed"; + /// using (System.IO.Stream input = System.IO.File.OpenRead(inputFile)) + /// { + /// using (var outStream = System.IO.File.Create(outputFile)) + /// { + /// if (deflater == null) + /// deflater = new ParallelDeflateOutputStream(outStream, + /// CompressionLevel.Best, + /// CompressionStrategy.Default, + /// true); + /// deflater.Reset(outStream); + /// + /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0) + /// { + /// deflater.Write(buffer, 0, n); + /// } + /// } + /// } + /// } + /// + /// + public void Reset(Stream stream) + { + TraceOutput(TraceBits.Session, "-------------------------------------------------------"); + TraceOutput(TraceBits.Session, "Reset {0:X8} firstDone({1})", this.GetHashCode(), _firstWriteDone); + + if (!_firstWriteDone) return; + + if (_noMoreInputForThisSegment) + { + // wait til done writing: + TraceOutput(TraceBits.Synch, "Synch _writingDone.WaitOne(begin) Reset"); + _writingDone.WaitOne(); + TraceOutput(TraceBits.Synch, "Synch _writingDone.WaitOne(done) Reset"); + + // reset all status + foreach (var workitem in _pool) + workitem.status = (int) WorkItem.Status.None; + + _noMoreInputForThisSegment= false; + _nextToFill= _nextToWrite= 0; + _totalBytesProcessed = 0L; + _Crc32= 0; + _isClosed= false; + + TraceOutput(TraceBits.Synch, "Synch _writingDone.Reset() Reset"); + _writingDone.Reset(); + } + else + { + TraceOutput(TraceBits.Synch, "Synch Reset noMore=false"); + } + + _outStream = stream; + + // release the writer thread for the next "session" + TraceOutput(TraceBits.Synch, "Synch _sessionReset.Set() Reset"); + _sessionReset.Set(); + } + + + + private void _PerpetualWriterMethod(object state) + { + TraceOutput(TraceBits.WriterThread, "_PerpetualWriterMethod START"); + + try + { + do + { + // wait for the next session + TraceOutput(TraceBits.Synch | TraceBits.WriterThread, "Synch _sessionReset.WaitOne(begin) PWM"); + _sessionReset.WaitOne(); + TraceOutput(TraceBits.Synch | TraceBits.WriterThread, "Synch _sessionReset.WaitOne(done) PWM"); + + if (_isDisposed) break; + + TraceOutput(TraceBits.Synch | TraceBits.WriterThread, "Synch _sessionReset.Reset() PWM"); + _sessionReset.Reset(); + + // repeatedly write buffers as they become ready + WorkItem workitem = null; + Ionic.Zlib.CRC32 c= new Ionic.Zlib.CRC32(); + do + { + workitem = _pool[_nextToWrite % _pc]; + lock(workitem) + { + if (_noMoreInputForThisSegment) + TraceOutput(TraceBits.Write, + "Write drain wi({0}) stat({1}) canuse({2}) cba({3})", + workitem.index, + workitem.status, + (workitem.status == (int)WorkItem.Status.Compressed), + workitem.compressedBytesAvailable); + + do + { + if (workitem.status == (int)WorkItem.Status.Compressed) + { + TraceOutput(TraceBits.WriteBegin, + "Write begin wi({0}) stat({1}) cba({2})", + workitem.index, + workitem.status, + workitem.compressedBytesAvailable); + + workitem.status = (int)WorkItem.Status.Writing; + _outStream.Write(workitem.compressed, 0, workitem.compressedBytesAvailable); + c.Combine(workitem.crc, workitem.inputBytesAvailable); + _totalBytesProcessed += workitem.inputBytesAvailable; + _nextToWrite++; + workitem.inputBytesAvailable= 0; + workitem.status = (int)WorkItem.Status.Done; + + TraceOutput(TraceBits.WriteDone, + "Write done wi({0}) stat({1}) cba({2})", + workitem.index, + workitem.status, + workitem.compressedBytesAvailable); + + + Monitor.Pulse(workitem); + break; + } + else + { + int wcycles = 0; + // I've locked a workitem I cannot use. + // Therefore, wake someone else up, and then release the lock. + while (workitem.status != (int)WorkItem.Status.Compressed) + { + TraceOutput(TraceBits.WriteWait, + "Write waiting wi({0}) stat({1}) nw({2}) nf({3}) nomore({4})", + workitem.index, + workitem.status, + _nextToWrite, _nextToFill, + _noMoreInputForThisSegment ); + + if (_noMoreInputForThisSegment && _nextToWrite == _nextToFill) + break; + + wcycles++; + + // wake up someone else + Monitor.Pulse(workitem); + // release and wait + Monitor.Wait(workitem); + + if (workitem.status == (int)WorkItem.Status.Compressed) + TraceOutput(TraceBits.WriteWait, + "Write A-OK wi({0}) stat({1}) iba({2}) cba({3}) cyc({4})", + workitem.index, + workitem.status, + workitem.inputBytesAvailable, + workitem.compressedBytesAvailable, + wcycles); + } + + if (_noMoreInputForThisSegment && _nextToWrite == _nextToFill) + break; + + } + } + while (true); + } + + if (_noMoreInputForThisSegment) + TraceOutput(TraceBits.Write, + "Write nomore nw({0}) nf({1}) break({2})", + _nextToWrite, _nextToFill, (_nextToWrite == _nextToFill)); + + if (_noMoreInputForThisSegment && _nextToWrite == _nextToFill) + break; + + } while (true); + + + // Finish: + // After writing a series of buffers, closing each one with + // Flush.Sync, we now write the final one as Flush.Finish, and + // then stop. + byte[] buffer = new byte[128]; + ZlibCodec compressor = new ZlibCodec(); + int rc = compressor.InitializeDeflate(_compressLevel, false); + compressor.InputBuffer = null; + compressor.NextIn = 0; + compressor.AvailableBytesIn = 0; + compressor.OutputBuffer = buffer; + compressor.NextOut = 0; + compressor.AvailableBytesOut = buffer.Length; + rc = compressor.Deflate(FlushType.Finish); + + if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK) + throw new Exception("deflating: " + compressor.Message); + + if (buffer.Length - compressor.AvailableBytesOut > 0) + { + TraceOutput(TraceBits.WriteBegin, + "Write begin flush bytes({0})", + buffer.Length - compressor.AvailableBytesOut); + + _outStream.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut); + + TraceOutput(TraceBits.WriteBegin, + "Write done flush"); + } + + compressor.EndDeflate(); + + _Crc32 = c.Crc32Result; + + // signal that writing is complete: + TraceOutput(TraceBits.Synch, "Synch _writingDone.Set() PWM"); + _writingDone.Set(); + } + while (true); + } + catch (System.Exception exc1) + { + lock(_eLock) + { + // expose the exception to the main thread + if (_pendingException!=null) + _pendingException = exc1; + } + } + + TraceOutput(TraceBits.WriterThread, "_PerpetualWriterMethod FINIS"); + } + + + + + private void _DeflateOne(Object wi) + { + WorkItem workitem = (WorkItem) wi; + try + { + // compress one buffer + int myItem = workitem.index; + + lock(workitem) + { + if (workitem.status != (int)WorkItem.Status.Filled) + throw new InvalidOperationException(); + + Ionic.Zlib.CRC32 crc = new CRC32(); + + // use the workitem: + // calc CRC on the buffer + crc.SlurpBlock(workitem.buffer, 0, workitem.inputBytesAvailable); + + // deflate it + DeflateOneSegment(workitem); + + // update status + workitem.status = (int)WorkItem.Status.Compressed; + workitem.crc = crc.Crc32Result; + + TraceOutput(TraceBits.Compress, + "Compress wi({0}) stat({1}) len({2})", + workitem.index, + workitem.status, + workitem.compressedBytesAvailable + ); + + // release the item + Monitor.Pulse(workitem); + } + } + catch (System.Exception exc1) + { + lock(_eLock) + { + // expose the exception to the main thread + if (_pendingException!=null) + _pendingException = exc1; + } + } + } + + + + + private bool DeflateOneSegment(WorkItem workitem) + { + ZlibCodec compressor = workitem.compressor; + int rc= 0; + compressor.ResetDeflate(); + compressor.NextIn = 0; + + compressor.AvailableBytesIn = workitem.inputBytesAvailable; + + // step 1: deflate the buffer + compressor.NextOut = 0; + compressor.AvailableBytesOut = workitem.compressed.Length; + do + { + compressor.Deflate(FlushType.None); + } + while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0); + + // step 2: flush (sync) + rc = compressor.Deflate(FlushType.Sync); + + workitem.compressedBytesAvailable= (int) compressor.TotalBytesOut; + return true; + } + + + [System.Diagnostics.ConditionalAttribute("Trace")] + private void TraceOutput(TraceBits bits, string format, params object[] varParams) + { + if ((bits & _DesiredTrace) != 0) + { + lock(_outputLock) + { + int tid = Thread.CurrentThread.GetHashCode(); + Console.ForegroundColor = (ConsoleColor) (tid % 8 + 8); + Console.Write("{0:000} PDOS ", tid); + Console.WriteLine(format, varParams); + Console.ResetColor(); + } + } + } + + + // used only when Trace is defined + [Flags] + enum TraceBits + { + None = 0, + Write = 1, // write out + WriteBegin = 2, // begin to write out + WriteDone = 4, // done writing out + WriteWait = 8, // write thread waiting for buffer + Flush = 16, + Compress = 32, // async compress + Fill = 64, // filling buffers, when caller invokes Write() + Lifecycle = 128, // constructor/disposer + Session = 256, // Close/Reset + Synch = 512, // thread synchronization + WriterThread = 1024, // writer thread + } + + + + /// + /// Indicates whether the stream supports Seek operations. + /// + /// + /// Always returns false. + /// + public override bool CanSeek + { + get { return false; } + } + + + /// + /// Indicates whether the stream supports Read operations. + /// + /// + /// Always returns false. + /// + public override bool CanRead + { + get {return false;} + } + + /// + /// Indicates whether the stream supports Write operations. + /// + /// + /// Returns true if the provided stream is writable. + /// + public override bool CanWrite + { + get { return _outStream.CanWrite; } + } + + /// + /// Reading this property always throws a NotImplementedException. + /// + public override long Length + { + get { throw new NotImplementedException(); } + } + + /// + /// Reading or Writing this property always throws a NotImplementedException. + /// + public override long Position + { + get { throw new NotImplementedException(); } + set { throw new NotImplementedException(); } + } + + /// + /// This method always throws a NotImplementedException. + /// + public override int Read(byte[] buffer, int offset, int count) + { + throw new NotImplementedException(); + } + + /// + /// This method always throws a NotImplementedException. + /// + public override long Seek(long offset, System.IO.SeekOrigin origin) + { + throw new NotImplementedException(); + } + + /// + /// This method always throws a NotImplementedException. + /// + public override void SetLength(long value) + { + throw new NotImplementedException(); + } + + } + +} + + diff --git a/NBToolkit/NBToolkit/Zlib/Tree.cs b/NBToolkit/NBToolkit/Zlib/Tree.cs new file mode 100644 index 0000000..954872a --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/Tree.cs @@ -0,0 +1,423 @@ +// Tree.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-October-28 13:29:50> +// +// ------------------------------------------------------------------ +// +// This module defines classes for zlib compression and +// decompression. This code is derived from the jzlib implementation of +// zlib. In keeping with the license for jzlib, the copyright to that +// code is below. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + + +using System; + +namespace Ionic.Zlib +{ + sealed class Tree + { + private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1); + + // extra bits for each length code + internal static readonly int[] ExtraLengthBits = new int[] + { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 + }; + + // extra bits for each distance code + internal static readonly int[] ExtraDistanceBits = new int[] + { + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 + }; + + // extra bits for each bit length code + internal static readonly int[] extra_blbits = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7}; + + internal static readonly sbyte[] bl_order = new sbyte[]{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + + + // The lengths of the bit length codes are sent in order of decreasing + // probability, to avoid transmitting the lengths for unused bit + // length codes. + + internal const int Buf_size = 8 * 2; + + // see definition of array dist_code below + //internal const int DIST_CODE_LEN = 512; + + private static readonly sbyte[] _dist_code = new sbyte[] + { + 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 + }; + + internal static readonly sbyte[] LengthCode = new sbyte[] + { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, + 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, + 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 + }; + + + internal static readonly int[] LengthBase = new int[] + { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, + 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0 + }; + + + internal static readonly int[] DistanceBase = new int[] + { + 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, + 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576 + }; + + + /// + /// Map from a distance to a distance code. + /// + /// + /// No side effects. _dist_code[256] and _dist_code[257] are never used. + /// + internal static int DistanceCode(int dist) + { + return (dist < 256) + ? _dist_code[dist] + : _dist_code[256 + SharedUtils.URShift(dist, 7)]; + } + + internal short[] dyn_tree; // the dynamic tree + internal int max_code; // largest code with non zero frequency + internal StaticTree staticTree; // the corresponding static tree + + // Compute the optimal bit lengths for a tree and update the total bit length + // for the current block. + // IN assertion: the fields freq and dad are set, heap[heap_max] and + // above are the tree nodes sorted by increasing frequency. + // OUT assertions: the field len is set to the optimal bit length, the + // array bl_count contains the frequencies for each bit length. + // The length opt_len is updated; static_len is also updated if stree is + // not null. + internal void gen_bitlen(DeflateManager s) + { + short[] tree = dyn_tree; + short[] stree = staticTree.treeCodes; + int[] extra = staticTree.extraBits; + int base_Renamed = staticTree.extraBase; + int max_length = staticTree.maxLength; + int h; // heap index + int n, m; // iterate over the tree elements + int bits; // bit length + int xbits; // extra bits + short f; // frequency + int overflow = 0; // number of elements with bit length too large + + for (bits = 0; bits <= InternalConstants.MAX_BITS; bits++) + s.bl_count[bits] = 0; + + // In a first pass, compute the optimal bit lengths (which may + // overflow in the case of the bit length tree). + tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap + + for (h = s.heap_max + 1; h < HEAP_SIZE; h++) + { + n = s.heap[h]; + bits = tree[tree[n * 2 + 1] * 2 + 1] + 1; + if (bits > max_length) + { + bits = max_length; overflow++; + } + tree[n * 2 + 1] = (short) bits; + // We overwrite tree[n*2+1] which is no longer needed + + if (n > max_code) + continue; // not a leaf node + + s.bl_count[bits]++; + xbits = 0; + if (n >= base_Renamed) + xbits = extra[n - base_Renamed]; + f = tree[n * 2]; + s.opt_len += f * (bits + xbits); + if (stree != null) + s.static_len += f * (stree[n * 2 + 1] + xbits); + } + if (overflow == 0) + return ; + + // This happens for example on obj2 and pic of the Calgary corpus + // Find the first bit length which could increase: + do + { + bits = max_length - 1; + while (s.bl_count[bits] == 0) + bits--; + s.bl_count[bits]--; // move one leaf down the tree + s.bl_count[bits + 1] = (short) (s.bl_count[bits + 1] + 2); // move one overflow item as its brother + s.bl_count[max_length]--; + // The brother of the overflow item also moves one step up, + // but this does not affect bl_count[max_length] + overflow -= 2; + } + while (overflow > 0); + + for (bits = max_length; bits != 0; bits--) + { + n = s.bl_count[bits]; + while (n != 0) + { + m = s.heap[--h]; + if (m > max_code) + continue; + if (tree[m * 2 + 1] != bits) + { + s.opt_len = (int) (s.opt_len + ((long) bits - (long) tree[m * 2 + 1]) * (long) tree[m * 2]); + tree[m * 2 + 1] = (short) bits; + } + n--; + } + } + } + + // Construct one Huffman tree and assigns the code bit strings and lengths. + // Update the total bit length for the current block. + // IN assertion: the field freq is set for all tree elements. + // OUT assertions: the fields len and code are set to the optimal bit length + // and corresponding code. The length opt_len is updated; static_len is + // also updated if stree is not null. The field max_code is set. + internal void build_tree(DeflateManager s) + { + short[] tree = dyn_tree; + short[] stree = staticTree.treeCodes; + int elems = staticTree.elems; + int n, m; // iterate over heap elements + int max_code = -1; // largest code with non zero frequency + int node; // new node being created + + // Construct the initial heap, with least frequent element in + // heap[1]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + // heap[0] is not used. + s.heap_len = 0; + s.heap_max = HEAP_SIZE; + + for (n = 0; n < elems; n++) + { + if (tree[n * 2] != 0) + { + s.heap[++s.heap_len] = max_code = n; + s.depth[n] = 0; + } + else + { + tree[n * 2 + 1] = 0; + } + } + + // The pkzip format requires that at least one distance code exists, + // and that at least one bit should be sent even if there is only one + // possible code. So to avoid special checks later on we force at least + // two codes of non zero frequency. + while (s.heap_len < 2) + { + node = s.heap[++s.heap_len] = (max_code < 2?++max_code:0); + tree[node * 2] = 1; + s.depth[node] = 0; + s.opt_len--; + if (stree != null) + s.static_len -= stree[node * 2 + 1]; + // node is 0 or 1 so it does not have extra bits + } + this.max_code = max_code; + + // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + // establish sub-heaps of increasing lengths: + + for (n = s.heap_len / 2; n >= 1; n--) + s.pqdownheap(tree, n); + + // Construct the Huffman tree by repeatedly combining the least two + // frequent nodes. + + node = elems; // next internal node of the tree + do + { + // n = node of least frequency + n = s.heap[1]; + s.heap[1] = s.heap[s.heap_len--]; + s.pqdownheap(tree, 1); + m = s.heap[1]; // m = node of next least frequency + + s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency + s.heap[--s.heap_max] = m; + + // Create a new node father of n and m + tree[node * 2] = unchecked((short) (tree[n * 2] + tree[m * 2])); + s.depth[node] = (sbyte) (System.Math.Max((byte) s.depth[n], (byte) s.depth[m]) + 1); + tree[n * 2 + 1] = tree[m * 2 + 1] = (short) node; + + // and insert the new node in the heap + s.heap[1] = node++; + s.pqdownheap(tree, 1); + } + while (s.heap_len >= 2); + + s.heap[--s.heap_max] = s.heap[1]; + + // At this point, the fields freq and dad are set. We can now + // generate the bit lengths. + + gen_bitlen(s); + + // The field len is now set, we can generate the bit codes + gen_codes(tree, max_code, s.bl_count); + } + + // Generate the codes for a given tree and bit counts (which need not be + // optimal). + // IN assertion: the array bl_count contains the bit length statistics for + // the given tree and the field len is set for all tree elements. + // OUT assertion: the field code is set for all tree elements of non + // zero code length. + internal static void gen_codes(short[] tree, int max_code, short[] bl_count) + { + short[] next_code = new short[InternalConstants.MAX_BITS + 1]; // next code value for each bit length + short code = 0; // running code value + int bits; // bit index + int n; // code index + + // The distribution counts are first used to generate the code values + // without bit reversal. + for (bits = 1; bits <= InternalConstants.MAX_BITS; bits++) + unchecked { + next_code[bits] = code = (short) ((code + bl_count[bits - 1]) << 1); + } + + // Check that the bit counts in bl_count are consistent. The last code + // must be all ones. + //Assert (code + bl_count[MAX_BITS]-1 == (1<>= 1; //SharedUtils.URShift(code, 1); + res <<= 1; + } + while (--len > 0); + return res >> 1; + } + } +} \ No newline at end of file diff --git a/NBToolkit/NBToolkit/Zlib/Zlib.cs b/NBToolkit/NBToolkit/Zlib/Zlib.cs new file mode 100644 index 0000000..d7cb5e1 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/Zlib.cs @@ -0,0 +1,456 @@ +// Zlib.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-November-07 05:26:55> +// +// ------------------------------------------------------------------ +// +// This module defines classes for ZLIB compression and +// decompression. This code is derived from the jzlib implementation of +// zlib, but significantly modified. The object model is not the same, +// and many of the behaviors are new or different. Nonetheless, in +// keeping with the license for jzlib, the copyright to that code is +// included below. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + + + +using System; +using Interop=System.Runtime.InteropServices; + +namespace Ionic.Zlib +{ + + /// + /// The compression level to be used when using a DeflateStream or ZlibStream with CompressionMode.Compress. + /// + public enum CompressionLevel + { + /// + /// None means that the data will be simply stored, with no change at all. + /// If you are producing ZIPs for use on Mac OSX, be aware that archives produced with CompressionLevel.None + /// cannot be opened with the default zip reader. Use a different CompressionLevel. + /// + None= 0, + /// + /// Same as None. + /// + Level0 = 0, + + /// + /// The fastest but least effective compression. + /// + BestSpeed = 1, + + /// + /// A synonym for BestSpeed. + /// + Level1 = 1, + + /// + /// A little slower, but better, than level 1. + /// + Level2 = 2, + + /// + /// A little slower, but better, than level 2. + /// + Level3 = 3, + + /// + /// A little slower, but better, than level 3. + /// + Level4 = 4, + + /// + /// A little slower than level 4, but with better compression. + /// + Level5 = 5, + + /// + /// The default compression level, with a good balance of speed and compression efficiency. + /// + Default = 6, + /// + /// A synonym for Default. + /// + Level6 = 6, + + /// + /// Pretty good compression! + /// + Level7 = 7, + + /// + /// Better compression than Level7! + /// + Level8 = 8, + + /// + /// The "best" compression, where best means greatest reduction in size of the input data stream. + /// This is also the slowest compression. + /// + BestCompression = 9, + + /// + /// A synonym for BestCompression. + /// + Level9 = 9, + } + + /// + /// Describes options for how the compression algorithm is executed. Different strategies + /// work better on different sorts of data. The strategy parameter can affect the compression + /// ratio and the speed of compression but not the correctness of the compresssion. + /// + public enum CompressionStrategy + { + /// + /// The default strategy is probably the best for normal data. + /// + Default = 0, + + /// + /// The Filtered strategy is intended to be used most effectively with data produced by a + /// filter or predictor. By this definition, filtered data consists mostly of small + /// values with a somewhat random distribution. In this case, the compression algorithm + /// is tuned to compress them better. The effect of Filtered is to force more Huffman + /// coding and less string matching; it is a half-step between Default and HuffmanOnly. + /// + Filtered = 1, + + /// + /// Using HuffmanOnly will force the compressor to do Huffman encoding only, with no + /// string matching. + /// + HuffmanOnly = 2, + } + + + /// + /// An enum to specify the direction of transcoding - whether to compress or decompress. + /// + public enum CompressionMode + { + /// + /// Used to specify that the stream should compress the data. + /// + Compress= 0, + /// + /// Used to specify that the stream should decompress the data. + /// + Decompress = 1, + } + + + /// + /// A general purpose exception class for exceptions in the Zlib library. + /// + [Interop.GuidAttribute("ebc25cf6-9120-4283-b972-0e5520d0000E")] + public class ZlibException : System.Exception + { + /// + /// The ZlibException class captures exception information generated + /// by the Zlib library. + /// + public ZlibException() + : base() + { + } + + /// + /// This ctor collects a message attached to the exception. + /// + /// + public ZlibException(System.String s) + : base(s) + { + } + } + + + internal class SharedUtils + { + /// + /// Performs an unsigned bitwise right shift with the specified number + /// + /// Number to operate on + /// Ammount of bits to shift + /// The resulting number from the shift operation + public static int URShift(int number, int bits) + { + return (int)((uint)number >> bits); + } + +#if NOT + /// + /// Performs an unsigned bitwise right shift with the specified number + /// + /// Number to operate on + /// Ammount of bits to shift + /// The resulting number from the shift operation + public static long URShift(long number, int bits) + { + return (long) ((UInt64)number >> bits); + } +#endif + + /// + /// Reads a number of characters from the current source TextReader and writes + /// the data to the target array at the specified index. + /// + /// + /// The source TextReader to read from + /// Contains the array of characteres read from the source TextReader. + /// The starting index of the target array. + /// The maximum number of characters to read from the source TextReader. + /// + /// + /// The number of characters read. The number will be less than or equal to + /// count depending on the data available in the source TextReader. Returns -1 + /// if the end of the stream is reached. + /// + public static System.Int32 ReadInput(System.IO.TextReader sourceTextReader, byte[] target, int start, int count) + { + // Returns 0 bytes if not enough space in target + if (target.Length == 0) return 0; + + char[] charArray = new char[target.Length]; + int bytesRead = sourceTextReader.Read(charArray, start, count); + + // Returns -1 if EOF + if (bytesRead == 0) return -1; + + for (int index = start; index < start + bytesRead; index++) + target[index] = (byte)charArray[index]; + + return bytesRead; + } + + + internal static byte[] ToByteArray(System.String sourceString) + { + return System.Text.UTF8Encoding.UTF8.GetBytes(sourceString); + } + + + internal static char[] ToCharArray(byte[] byteArray) + { + return System.Text.UTF8Encoding.UTF8.GetChars(byteArray); + } + } + + internal static class InternalConstants + { + internal static readonly int MAX_BITS = 15; + internal static readonly int BL_CODES = 19; + internal static readonly int D_CODES = 30; + internal static readonly int LITERALS = 256; + internal static readonly int LENGTH_CODES = 29; + internal static readonly int L_CODES = (LITERALS + 1 + LENGTH_CODES); + + // Bit length codes must not exceed MAX_BL_BITS bits + internal static readonly int MAX_BL_BITS = 7; + + // repeat previous bit length 3-6 times (2 bits of repeat count) + internal static readonly int REP_3_6 = 16; + + // repeat a zero length 3-10 times (3 bits of repeat count) + internal static readonly int REPZ_3_10 = 17; + + // repeat a zero length 11-138 times (7 bits of repeat count) + internal static readonly int REPZ_11_138 = 18; + + } + + internal sealed class StaticTree + { + internal static readonly short[] lengthAndLiteralsTreeCodes = new short[] { + 12, 8, 140, 8, 76, 8, 204, 8, 44, 8, 172, 8, 108, 8, 236, 8, + 28, 8, 156, 8, 92, 8, 220, 8, 60, 8, 188, 8, 124, 8, 252, 8, + 2, 8, 130, 8, 66, 8, 194, 8, 34, 8, 162, 8, 98, 8, 226, 8, + 18, 8, 146, 8, 82, 8, 210, 8, 50, 8, 178, 8, 114, 8, 242, 8, + 10, 8, 138, 8, 74, 8, 202, 8, 42, 8, 170, 8, 106, 8, 234, 8, + 26, 8, 154, 8, 90, 8, 218, 8, 58, 8, 186, 8, 122, 8, 250, 8, + 6, 8, 134, 8, 70, 8, 198, 8, 38, 8, 166, 8, 102, 8, 230, 8, + 22, 8, 150, 8, 86, 8, 214, 8, 54, 8, 182, 8, 118, 8, 246, 8, + 14, 8, 142, 8, 78, 8, 206, 8, 46, 8, 174, 8, 110, 8, 238, 8, + 30, 8, 158, 8, 94, 8, 222, 8, 62, 8, 190, 8, 126, 8, 254, 8, + 1, 8, 129, 8, 65, 8, 193, 8, 33, 8, 161, 8, 97, 8, 225, 8, + 17, 8, 145, 8, 81, 8, 209, 8, 49, 8, 177, 8, 113, 8, 241, 8, + 9, 8, 137, 8, 73, 8, 201, 8, 41, 8, 169, 8, 105, 8, 233, 8, + 25, 8, 153, 8, 89, 8, 217, 8, 57, 8, 185, 8, 121, 8, 249, 8, + 5, 8, 133, 8, 69, 8, 197, 8, 37, 8, 165, 8, 101, 8, 229, 8, + 21, 8, 149, 8, 85, 8, 213, 8, 53, 8, 181, 8, 117, 8, 245, 8, + 13, 8, 141, 8, 77, 8, 205, 8, 45, 8, 173, 8, 109, 8, 237, 8, + 29, 8, 157, 8, 93, 8, 221, 8, 61, 8, 189, 8, 125, 8, 253, 8, + 19, 9, 275, 9, 147, 9, 403, 9, 83, 9, 339, 9, 211, 9, 467, 9, + 51, 9, 307, 9, 179, 9, 435, 9, 115, 9, 371, 9, 243, 9, 499, 9, + 11, 9, 267, 9, 139, 9, 395, 9, 75, 9, 331, 9, 203, 9, 459, 9, + 43, 9, 299, 9, 171, 9, 427, 9, 107, 9, 363, 9, 235, 9, 491, 9, + 27, 9, 283, 9, 155, 9, 411, 9, 91, 9, 347, 9, 219, 9, 475, 9, + 59, 9, 315, 9, 187, 9, 443, 9, 123, 9, 379, 9, 251, 9, 507, 9, + 7, 9, 263, 9, 135, 9, 391, 9, 71, 9, 327, 9, 199, 9, 455, 9, + 39, 9, 295, 9, 167, 9, 423, 9, 103, 9, 359, 9, 231, 9, 487, 9, + 23, 9, 279, 9, 151, 9, 407, 9, 87, 9, 343, 9, 215, 9, 471, 9, + 55, 9, 311, 9, 183, 9, 439, 9, 119, 9, 375, 9, 247, 9, 503, 9, + 15, 9, 271, 9, 143, 9, 399, 9, 79, 9, 335, 9, 207, 9, 463, 9, + 47, 9, 303, 9, 175, 9, 431, 9, 111, 9, 367, 9, 239, 9, 495, 9, + 31, 9, 287, 9, 159, 9, 415, 9, 95, 9, 351, 9, 223, 9, 479, 9, + 63, 9, 319, 9, 191, 9, 447, 9, 127, 9, 383, 9, 255, 9, 511, 9, + 0, 7, 64, 7, 32, 7, 96, 7, 16, 7, 80, 7, 48, 7, 112, 7, + 8, 7, 72, 7, 40, 7, 104, 7, 24, 7, 88, 7, 56, 7, 120, 7, + 4, 7, 68, 7, 36, 7, 100, 7, 20, 7, 84, 7, 52, 7, 116, 7, + 3, 8, 131, 8, 67, 8, 195, 8, 35, 8, 163, 8, 99, 8, 227, 8 + }; + + internal static readonly short[] distTreeCodes = new short[] { + 0, 5, 16, 5, 8, 5, 24, 5, 4, 5, 20, 5, 12, 5, 28, 5, + 2, 5, 18, 5, 10, 5, 26, 5, 6, 5, 22, 5, 14, 5, 30, 5, + 1, 5, 17, 5, 9, 5, 25, 5, 5, 5, 21, 5, 13, 5, 29, 5, + 3, 5, 19, 5, 11, 5, 27, 5, 7, 5, 23, 5 }; + + internal static readonly StaticTree Literals; + internal static readonly StaticTree Distances; + internal static readonly StaticTree BitLengths; + + internal short[] treeCodes; // static tree or null + internal int[] extraBits; // extra bits for each code or null + internal int extraBase; // base index for extra_bits + internal int elems; // max number of elements in the tree + internal int maxLength; // max bit length for the codes + + private StaticTree(short[] treeCodes, int[] extraBits, int extraBase, int elems, int maxLength) + { + this.treeCodes = treeCodes; + this.extraBits = extraBits; + this.extraBase = extraBase; + this.elems = elems; + this.maxLength = maxLength; + } + static StaticTree() + { + Literals = new StaticTree(lengthAndLiteralsTreeCodes, Tree.ExtraLengthBits, InternalConstants.LITERALS + 1, InternalConstants.L_CODES, InternalConstants.MAX_BITS); + Distances = new StaticTree(distTreeCodes, Tree.ExtraDistanceBits, 0, InternalConstants.D_CODES, InternalConstants.MAX_BITS); + BitLengths = new StaticTree(null, Tree.extra_blbits, 0, InternalConstants.BL_CODES, InternalConstants.MAX_BL_BITS); + } + } + + + + /// + /// Computes an Adler-32 checksum. + /// + /// + /// The Adler checksum is similar to a CRC checksum, but faster to compute, though less + /// reliable. It is used in producing RFC1950 compressed streams. The Adler checksum + /// is a required part of the "ZLIB" standard. Applications will almost never need to + /// use this class directly. + /// + internal sealed class Adler + { + // largest prime smaller than 65536 + private static readonly uint BASE = 65521; + // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 + private static readonly int NMAX = 5552; + + static internal uint Adler32(uint adler, byte[] buf, int index, int len) + { + if (buf == null) + return 1; + + uint s1 = (uint) (adler & 0xffff); + uint s2 = (uint) ((adler >> 16) & 0xffff); + + while (len > 0) + { + int k = len < NMAX ? len : NMAX; + len -= k; + while (k >= 16) + { + //s1 += (buf[index++] & 0xff); s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + s1 += buf[index++]; s2 += s1; + k -= 16; + } + if (k != 0) + { + do + { + s1 += buf[index++]; + s2 += s1; + } + while (--k != 0); + } + s1 %= BASE; + s2 %= BASE; + } + return (uint)((s2 << 16) | s1); + } + + } + +} \ No newline at end of file diff --git a/NBToolkit/NBToolkit/Zlib/ZlibBaseStream.cs b/NBToolkit/NBToolkit/Zlib/ZlibBaseStream.cs new file mode 100644 index 0000000..43fd5b3 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/ZlibBaseStream.cs @@ -0,0 +1,627 @@ +// ZlibBaseStream.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-October-28 15:45:15> +// +// ------------------------------------------------------------------ +// +// This module defines the ZlibBaseStream class, which is an intnernal +// base class for DeflateStream, ZlibStream and GZipStream. +// +// ------------------------------------------------------------------ + +using System; +using System.IO; + +namespace Ionic.Zlib +{ + + internal enum ZlibStreamFlavor { ZLIB = 1950, DEFLATE = 1951, GZIP = 1952 } + + internal class ZlibBaseStream : System.IO.Stream + { + protected internal ZlibCodec _z = null; // deferred init... new ZlibCodec(); + + protected internal StreamMode _streamMode = StreamMode.Undefined; + protected internal FlushType _flushMode; + protected internal ZlibStreamFlavor _flavor; + protected internal CompressionMode _compressionMode; + protected internal CompressionLevel _level; + protected internal bool _leaveOpen; + protected internal byte[] _workingBuffer; + protected internal int _bufferSize = ZlibConstants.WorkingBufferSizeDefault; + protected internal byte[] _buf1 = new byte[1]; + + protected internal System.IO.Stream _stream; + protected internal CompressionStrategy Strategy = CompressionStrategy.Default; + + // workitem 7159 + Ionic.Zlib.CRC32 crc; + protected internal string _GzipFileName; + protected internal string _GzipComment; + protected internal DateTime _GzipMtime; + protected internal int _gzipHeaderByteCount; + + internal int Crc32 { get { if (crc == null) return 0; return crc.Crc32Result; } } + + public ZlibBaseStream(System.IO.Stream stream, + CompressionMode compressionMode, + CompressionLevel level, + ZlibStreamFlavor flavor, + bool leaveOpen) + : base() + { + this._flushMode = FlushType.None; + //this._workingBuffer = new byte[WORKING_BUFFER_SIZE_DEFAULT]; + this._stream = stream; + this._leaveOpen = leaveOpen; + this._compressionMode = compressionMode; + this._flavor = flavor; + this._level = level; + // workitem 7159 + if (flavor == ZlibStreamFlavor.GZIP) + { + crc = new CRC32(); + } + } + + + protected internal bool _wantCompress + { + get + { + return (this._compressionMode == CompressionMode.Compress); + } + } + + private ZlibCodec z + { + get + { + if (_z == null) + { + bool wantRfc1950Header = (this._flavor == ZlibStreamFlavor.ZLIB); + _z = new ZlibCodec(); + if (this._compressionMode == CompressionMode.Decompress) + { + _z.InitializeInflate(wantRfc1950Header); + } + else + { + _z.Strategy = Strategy; + _z.InitializeDeflate(this._level, wantRfc1950Header); + } + } + return _z; + } + } + + + + private byte[] workingBuffer + { + get + { + if (_workingBuffer == null) + _workingBuffer = new byte[_bufferSize]; + return _workingBuffer; + } + } + + + + public override void Write(System.Byte[] buffer, int offset, int count) + { + // workitem 7159 + // calculate the CRC on the unccompressed data (before writing) + if (crc != null) + crc.SlurpBlock(buffer, offset, count); + + if (_streamMode == StreamMode.Undefined) + _streamMode = StreamMode.Writer; + else if (_streamMode != StreamMode.Writer) + throw new ZlibException("Cannot Write after Reading."); + + if (count == 0) + return; + + // first reference of z property will initialize the private var _z + z.InputBuffer = buffer; + _z.NextIn = offset; + _z.AvailableBytesIn = count; + bool done = false; + do + { + _z.OutputBuffer = workingBuffer; + _z.NextOut = 0; + _z.AvailableBytesOut = _workingBuffer.Length; + int rc = (_wantCompress) + ? _z.Deflate(_flushMode) + : _z.Inflate(_flushMode); + if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + throw new ZlibException((_wantCompress ? "de" : "in") + "flating: " + _z.Message); + + //if (_workingBuffer.Length - _z.AvailableBytesOut > 0) + _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut); + + done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0; + + // If GZIP and de-compress, we're done when 8 bytes remain. + if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress) + done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0); + + } + while (!done); + } + + + + private void finish() + { + if (_z == null) return; + + if (_streamMode == StreamMode.Writer) + { + bool done = false; + do + { + _z.OutputBuffer = workingBuffer; + _z.NextOut = 0; + _z.AvailableBytesOut = _workingBuffer.Length; + int rc = (_wantCompress) + ? _z.Deflate(FlushType.Finish) + : _z.Inflate(FlushType.Finish); + + if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK) + { + string verb = (_wantCompress ? "de" : "in") + "flating"; + if (_z.Message == null) + throw new ZlibException(String.Format("{0}: (rc = {1})", verb, rc)); + else + throw new ZlibException(verb + ": " + _z.Message); + } + + if (_workingBuffer.Length - _z.AvailableBytesOut > 0) + { + _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut); + } + + done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0; + // If GZIP and de-compress, we're done when 8 bytes remain. + if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress) + done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0); + + } + while (!done); + + Flush(); + + // workitem 7159 + if (_flavor == ZlibStreamFlavor.GZIP) + { + if (_wantCompress) + { + // Emit the GZIP trailer: CRC32 and size mod 2^32 + int c1 = crc.Crc32Result; + _stream.Write(BitConverter.GetBytes(c1), 0, 4); + int c2 = (Int32)(crc.TotalBytesRead & 0x00000000FFFFFFFF); + _stream.Write(BitConverter.GetBytes(c2), 0, 4); + } + else + { + throw new ZlibException("Writing with decompression is not supported."); + } + } + } + // workitem 7159 + else if (_streamMode == StreamMode.Reader) + { + if (_flavor == ZlibStreamFlavor.GZIP) + { + if (!_wantCompress) + { + // workitem 8501: handle edge case (decompress empty stream) + if (_z.TotalBytesOut == 0L) + return; + + // Read and potentially verify the GZIP trailer: CRC32 and size mod 2^32 + byte[] trailer = new byte[8]; + + // workitem 8679 + if (_z.AvailableBytesIn != 8) + { + // Make sure we have read to the end of the stream + Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, _z.AvailableBytesIn); + int bytesNeeded = 8 - _z.AvailableBytesIn; + int bytesRead = _stream.Read(trailer, + _z.AvailableBytesIn, + bytesNeeded); + if (bytesNeeded != bytesRead) + { + throw new ZlibException(String.Format("Protocol error. AvailableBytesIn={0}, expected 8", + _z.AvailableBytesIn + bytesRead)); + } + } + else + { + Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, trailer.Length); + } + + + Int32 crc32_expected = BitConverter.ToInt32(trailer, 0); + Int32 crc32_actual = crc.Crc32Result; + Int32 isize_expected = BitConverter.ToInt32(trailer, 4); + Int32 isize_actual = (Int32)(_z.TotalBytesOut & 0x00000000FFFFFFFF); + + if (crc32_actual != crc32_expected) + throw new ZlibException(String.Format("Bad CRC32 in GZIP stream. (actual({0:X8})!=expected({1:X8}))", crc32_actual, crc32_expected)); + + if (isize_actual != isize_expected) + throw new ZlibException(String.Format("Bad size in GZIP stream. (actual({0})!=expected({1}))", isize_actual, isize_expected)); + + } + else + { + throw new ZlibException("Reading with compression is not supported."); + } + } + } + } + + + private void end() + { + if (z == null) + return; + if (_wantCompress) + { + _z.EndDeflate(); + } + else + { + _z.EndInflate(); + } + _z = null; + } + + + public override void Close() + { + if (_stream == null) return; + try + { + finish(); + } + finally + { + end(); + if (!_leaveOpen) _stream.Close(); + _stream = null; + } + } + + public override void Flush() + { + _stream.Flush(); + } + + public override System.Int64 Seek(System.Int64 offset, System.IO.SeekOrigin origin) + { + throw new NotImplementedException(); + //_outStream.Seek(offset, origin); + } + public override void SetLength(System.Int64 value) + { + _stream.SetLength(value); + } + + +#if NOT + public int Read() + { + if (Read(_buf1, 0, 1) == 0) + return 0; + // calculate CRC after reading + if (crc!=null) + crc.SlurpBlock(_buf1,0,1); + return (_buf1[0] & 0xFF); + } +#endif + + private bool nomoreinput = false; + + + + private string ReadZeroTerminatedString() + { + var list = new System.Collections.Generic.List(); + bool done = false; + do + { + // workitem 7740 + int n = _stream.Read(_buf1, 0, 1); + if (n != 1) + throw new ZlibException("Unexpected EOF reading GZIP header."); + else + { + if (_buf1[0] == 0) + done = true; + else + list.Add(_buf1[0]); + } + } while (!done); + byte[] a = list.ToArray(); + return GZipStream.iso8859dash1.GetString(a, 0, a.Length); + } + + + private int _ReadAndValidateGzipHeader() + { + int totalBytesRead = 0; + // read the header on the first read + byte[] header = new byte[10]; + int n = _stream.Read(header, 0, header.Length); + + // workitem 8501: handle edge case (decompress empty stream) + if (n == 0) + return 0; + + if (n != 10) + throw new ZlibException("Not a valid GZIP stream."); + + if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8) + throw new ZlibException("Bad GZIP header."); + + Int32 timet = BitConverter.ToInt32(header, 4); + _GzipMtime = GZipStream._unixEpoch.AddSeconds(timet); + totalBytesRead += n; + if ((header[3] & 0x04) == 0x04) + { + // read and discard extra field + n = _stream.Read(header, 0, 2); // 2-byte length field + totalBytesRead += n; + + Int16 extraLength = (Int16)(header[0] + header[1] * 256); + byte[] extra = new byte[extraLength]; + n = _stream.Read(extra, 0, extra.Length); + if (n != extraLength) + throw new ZlibException("Unexpected end-of-file reading GZIP header."); + totalBytesRead += n; + } + if ((header[3] & 0x08) == 0x08) + _GzipFileName = ReadZeroTerminatedString(); + if ((header[3] & 0x10) == 0x010) + _GzipComment = ReadZeroTerminatedString(); + if ((header[3] & 0x02) == 0x02) + Read(_buf1, 0, 1); // CRC16, ignore + + return totalBytesRead; + } + + + + public override System.Int32 Read(System.Byte[] buffer, System.Int32 offset, System.Int32 count) + { + // According to MS documentation, any implementation of the IO.Stream.Read function must: + // (a) throw an exception if offset & count reference an invalid part of the buffer, + // or if count < 0, or if buffer is null + // (b) return 0 only upon EOF, or if count = 0 + // (c) if not EOF, then return at least 1 byte, up to bytes + + if (_streamMode == StreamMode.Undefined) + { + if (!this._stream.CanRead) throw new ZlibException("The stream is not readable."); + // for the first read, set up some controls. + _streamMode = StreamMode.Reader; + // (The first reference to _z goes through the private accessor which + // may initialize it.) + z.AvailableBytesIn = 0; + if (_flavor == ZlibStreamFlavor.GZIP) + { + _gzipHeaderByteCount = _ReadAndValidateGzipHeader(); + // workitem 8501: handle edge case (decompress empty stream) + if (_gzipHeaderByteCount == 0) + return 0; + } + } + + if (_streamMode != StreamMode.Reader) + throw new ZlibException("Cannot Read after Writing."); + + if (count == 0) return 0; + if (nomoreinput && _wantCompress) return 0; // workitem 8557 + if (buffer == null) throw new ArgumentNullException("buffer"); + if (count < 0) throw new ArgumentOutOfRangeException("count"); + if (offset < buffer.GetLowerBound(0)) throw new ArgumentOutOfRangeException("offset"); + if ((offset + count) > buffer.GetLength(0)) throw new ArgumentOutOfRangeException("count"); + + int rc = 0; + + // set up the output of the deflate/inflate codec: + _z.OutputBuffer = buffer; + _z.NextOut = offset; + _z.AvailableBytesOut = count; + + // This is necessary in case _workingBuffer has been resized. (new byte[]) + // (The first reference to _workingBuffer goes through the private accessor which + // may initialize it.) + _z.InputBuffer = workingBuffer; + + do + { + // need data in _workingBuffer in order to deflate/inflate. Here, we check if we have any. + if ((_z.AvailableBytesIn == 0) && (!nomoreinput)) + { + // No data available, so try to Read data from the captive stream. + _z.NextIn = 0; + _z.AvailableBytesIn = _stream.Read(_workingBuffer, 0, _workingBuffer.Length); + if (_z.AvailableBytesIn == 0) + nomoreinput = true; + + } + // we have data in InputBuffer; now compress or decompress as appropriate + rc = (_wantCompress) + ? _z.Deflate(_flushMode) + : _z.Inflate(_flushMode); + + if (nomoreinput && (rc == ZlibConstants.Z_BUF_ERROR)) + return 0; + + if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + throw new ZlibException(String.Format("{0}flating: rc={1} msg={2}", (_wantCompress ? "de" : "in"), rc, _z.Message)); + + if ((nomoreinput || rc == ZlibConstants.Z_STREAM_END) && (_z.AvailableBytesOut == count)) + break; // nothing more to read + } + //while (_z.AvailableBytesOut == count && rc == ZlibConstants.Z_OK); + while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK); + + + // workitem 8557 + // is there more room in output? + if (_z.AvailableBytesOut > 0) + { + if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0) + { + // deferred + } + + // are we completely done reading? + if (nomoreinput) + { + // and in compression? + if (_wantCompress) + { + // no more input data available; therefore we flush to + // try to complete the read + rc = _z.Deflate(FlushType.Finish); + + if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + throw new ZlibException(String.Format("Deflating: rc={0} msg={1}", rc, _z.Message)); + } + } + } + + + rc = (count - _z.AvailableBytesOut); + + // calculate CRC after reading + if (crc != null) + crc.SlurpBlock(buffer, offset, rc); + + return rc; + } + + + + public override System.Boolean CanRead + { + get { return this._stream.CanRead; } + } + + public override System.Boolean CanSeek + { + get { return this._stream.CanSeek; } + } + + public override System.Boolean CanWrite + { + get { return this._stream.CanWrite; } + } + + public override System.Int64 Length + { + get { return _stream.Length; } + } + + public override long Position + { + get { throw new NotImplementedException(); } + set { throw new NotImplementedException(); } + } + + internal enum StreamMode + { + Writer, + Reader, + Undefined, + } + + + public static void CompressString(String s, Stream compressor) + { + byte[] uncompressed = System.Text.Encoding.UTF8.GetBytes(s); + using (compressor) + { + compressor.Write(uncompressed, 0, uncompressed.Length); + } + } + + public static void CompressBuffer(byte[] b, Stream compressor) + { + // workitem 8460 + using (compressor) + { + compressor.Write(b, 0, b.Length); + } + } + + public static String UncompressString(byte[] compressed, Stream decompressor) + { + // workitem 8460 + byte[] working = new byte[1024]; + var encoding = System.Text.Encoding.UTF8; + using (var output = new MemoryStream()) + { + using (decompressor) + { + int n; + while ((n = decompressor.Read(working, 0, working.Length)) != 0) + { + output.Write(working, 0, n); + } + } + + // reset to allow read from start + output.Seek(0, SeekOrigin.Begin); + var sr = new StreamReader(output, encoding); + return sr.ReadToEnd(); + } + } + + public static byte[] UncompressBuffer(byte[] compressed, Stream decompressor) + { + // workitem 8460 + byte[] working = new byte[1024]; + using (var output = new MemoryStream()) + { + using (decompressor) + { + int n; + while ((n = decompressor.Read(working, 0, working.Length)) != 0) + { + output.Write(working, 0, n); + } + } + return output.ToArray(); + } + } + + } + + +} diff --git a/NBToolkit/NBToolkit/Zlib/ZlibCodec.cs b/NBToolkit/NBToolkit/Zlib/ZlibCodec.cs new file mode 100644 index 0000000..813d248 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/ZlibCodec.cs @@ -0,0 +1,717 @@ +// ZlibCodec.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-November-03 15:40:51> +// +// ------------------------------------------------------------------ +// +// This module defines a Codec for ZLIB compression and +// decompression. This code extends code that was based the jzlib +// implementation of zlib, but this code is completely novel. The codec +// class is new, and encapsulates some behaviors that are new, and some +// that were present in other classes in the jzlib code base. In +// keeping with the license for jzlib, the copyright to the jzlib code +// is included below. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + + +using System; +using Interop=System.Runtime.InteropServices; + +namespace Ionic.Zlib +{ + /// + /// Encoder and Decoder for ZLIB and DEFLATE (IETF RFC1950 and RFC1951). + /// + /// + /// + /// This class compresses and decompresses data according to the Deflate algorithm + /// and optionally, the ZLIB format, as documented in RFC 1950 - ZLIB and RFC 1951 - DEFLATE. + /// + [Interop.GuidAttribute("ebc25cf6-9120-4283-b972-0e5520d0000D")] + [Interop.ComVisible(true)] +#if !NETCF + [Interop.ClassInterface(Interop.ClassInterfaceType.AutoDispatch)] +#endif + sealed public class ZlibCodec + { + /// + /// The buffer from which data is taken. + /// + public byte[] InputBuffer; + + /// + /// An index into the InputBuffer array, indicating where to start reading. + /// + public int NextIn; + + /// + /// The number of bytes available in the InputBuffer, starting at NextIn. + /// + /// + /// Generally you should set this to InputBuffer.Length before the first Inflate() or Deflate() call. + /// The class will update this number as calls to Inflate/Deflate are made. + /// + public int AvailableBytesIn; + + /// + /// Total number of bytes read so far, through all calls to Inflate()/Deflate(). + /// + public long TotalBytesIn; + + /// + /// Buffer to store output data. + /// + public byte[] OutputBuffer; + + /// + /// An index into the OutputBuffer array, indicating where to start writing. + /// + public int NextOut; + + /// + /// The number of bytes available in the OutputBuffer, starting at NextOut. + /// + /// + /// Generally you should set this to OutputBuffer.Length before the first Inflate() or Deflate() call. + /// The class will update this number as calls to Inflate/Deflate are made. + /// + public int AvailableBytesOut; + + /// + /// Total number of bytes written to the output so far, through all calls to Inflate()/Deflate(). + /// + public long TotalBytesOut; + + /// + /// used for diagnostics, when something goes wrong! + /// + public System.String Message; + + internal DeflateManager dstate; + internal InflateManager istate; + + internal uint _Adler32; + + /// + /// The compression level to use in this codec. Useful only in compression mode. + /// + public CompressionLevel CompressLevel = CompressionLevel.Default; + + /// + /// The number of Window Bits to use. + /// + /// + /// This gauges the size of the sliding window, and hence the + /// compression effectiveness as well as memory consumption. It's best to just leave this + /// setting alone if you don't know what it is. The maximum value is 15 bits, which implies + /// a 32k window. + /// + public int WindowBits = ZlibConstants.WindowBitsDefault; + + /// + /// The compression strategy to use. + /// + /// + /// This is only effective in compression. The theory offered by ZLIB is that different + /// strategies could potentially produce significant differences in compression behavior + /// for different data sets. Unfortunately I don't have any good recommendations for how + /// to set it differently. When I tested changing the strategy I got minimally different + /// compression performance. It's best to leave this property alone if you don't have a + /// good feel for it. Or, you may want to produce a test harness that runs through the + /// different strategy options and evaluates them on different file types. If you do that, + /// let me know your results. + /// + public CompressionStrategy Strategy = CompressionStrategy.Default; + + + /// + /// The Adler32 checksum on the data transferred through the codec so far. You probably don't need to look at this. + /// + public int Adler32 { get { return (int)_Adler32; } } + + + /// + /// Create a ZlibCodec. + /// + /// + /// If you use this default constructor, you will later have to explicitly call + /// InitializeInflate() or InitializeDeflate() before using the ZlibCodec to compress + /// or decompress. + /// + public ZlibCodec() { } + + /// + /// Create a ZlibCodec that either compresses or decompresses. + /// + /// + /// Indicates whether the codec should compress (deflate) or decompress (inflate). + /// + public ZlibCodec(CompressionMode mode) + { + if (mode == CompressionMode.Compress) + { + int rc = InitializeDeflate(); + if (rc != ZlibConstants.Z_OK) throw new ZlibException("Cannot initialize for deflate."); + } + else if (mode == CompressionMode.Decompress) + { + int rc = InitializeInflate(); + if (rc != ZlibConstants.Z_OK) throw new ZlibException("Cannot initialize for inflate."); + } + else throw new ZlibException("Invalid ZlibStreamFlavor."); + } + + /// + /// Initialize the inflation state. + /// + /// + /// It is not necessary to call this before using the ZlibCodec to inflate data; + /// It is implicitly called when you call the constructor. + /// + /// Z_OK if everything goes well. + public int InitializeInflate() + { + return InitializeInflate(this.WindowBits); + } + + /// + /// Initialize the inflation state with an explicit flag to + /// govern the handling of RFC1950 header bytes. + /// + /// + /// + /// By default, the ZLIB header defined in RFC 1950 is expected. If + /// you want to read a zlib stream you should specify true for + /// expectRfc1950Header. If you have a deflate stream, you will want to specify + /// false. It is only necessary to invoke this initializer explicitly if you + /// want to specify false. + /// + /// + /// whether to expect an RFC1950 header byte + /// pair when reading the stream of data to be inflated. + /// + /// Z_OK if everything goes well. + public int InitializeInflate(bool expectRfc1950Header) + { + return InitializeInflate(this.WindowBits, expectRfc1950Header); + } + + /// + /// Initialize the ZlibCodec for inflation, with the specified number of window bits. + /// + /// The number of window bits to use. If you need to ask what that is, + /// then you shouldn't be calling this initializer. + /// Z_OK if all goes well. + public int InitializeInflate(int windowBits) + { + this.WindowBits = windowBits; + return InitializeInflate(windowBits, true); + } + + /// + /// Initialize the inflation state with an explicit flag to govern the handling of + /// RFC1950 header bytes. + /// + /// + /// + /// If you want to read a zlib stream you should specify true for + /// expectRfc1950Header. In this case, the library will expect to find a ZLIB + /// header, as defined in RFC + /// 1950, in the compressed stream. If you will be reading a DEFLATE or + /// GZIP stream, which does not have such a header, you will want to specify + /// false. + /// + /// + /// whether to expect an RFC1950 header byte pair when reading + /// the stream of data to be inflated. + /// The number of window bits to use. If you need to ask what that is, + /// then you shouldn't be calling this initializer. + /// Z_OK if everything goes well. + public int InitializeInflate(int windowBits, bool expectRfc1950Header) + { + this.WindowBits = windowBits; + if (dstate != null) throw new ZlibException("You may not call InitializeInflate() after calling InitializeDeflate()."); + istate = new InflateManager(expectRfc1950Header); + return istate.Initialize(this, windowBits); + } + + /// + /// Inflate the data in the InputBuffer, placing the result in the OutputBuffer. + /// + /// + /// You must have set InputBuffer and OutputBuffer, NextIn and NextOut, and AvailableBytesIn and + /// AvailableBytesOut before calling this method. + /// + /// + /// + /// private void InflateBuffer() + /// { + /// int bufferSize = 1024; + /// byte[] buffer = new byte[bufferSize]; + /// ZlibCodec decompressor = new ZlibCodec(); + /// + /// Console.WriteLine("\n============================================"); + /// Console.WriteLine("Size of Buffer to Inflate: {0} bytes.", CompressedBytes.Length); + /// MemoryStream ms = new MemoryStream(DecompressedBytes); + /// + /// int rc = decompressor.InitializeInflate(); + /// + /// decompressor.InputBuffer = CompressedBytes; + /// decompressor.NextIn = 0; + /// decompressor.AvailableBytesIn = CompressedBytes.Length; + /// + /// decompressor.OutputBuffer = buffer; + /// + /// // pass 1: inflate + /// do + /// { + /// decompressor.NextOut = 0; + /// decompressor.AvailableBytesOut = buffer.Length; + /// rc = decompressor.Inflate(FlushType.None); + /// + /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + /// throw new Exception("inflating: " + decompressor.Message); + /// + /// ms.Write(decompressor.OutputBuffer, 0, buffer.Length - decompressor.AvailableBytesOut); + /// } + /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0); + /// + /// // pass 2: finish and flush + /// do + /// { + /// decompressor.NextOut = 0; + /// decompressor.AvailableBytesOut = buffer.Length; + /// rc = decompressor.Inflate(FlushType.Finish); + /// + /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK) + /// throw new Exception("inflating: " + decompressor.Message); + /// + /// if (buffer.Length - decompressor.AvailableBytesOut > 0) + /// ms.Write(buffer, 0, buffer.Length - decompressor.AvailableBytesOut); + /// } + /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0); + /// + /// decompressor.EndInflate(); + /// } + /// + /// + /// + /// The flush to use when inflating. + /// Z_OK if everything goes well. + public int Inflate(FlushType flush) + { + if (istate == null) + throw new ZlibException("No Inflate State!"); + return istate.Inflate(flush); + } + + + /// + /// Ends an inflation session. + /// + /// + /// Call this after successively calling Inflate(). This will cause all buffers to be flushed. + /// After calling this you cannot call Inflate() without a intervening call to one of the + /// InitializeInflate() overloads. + /// + /// Z_OK if everything goes well. + public int EndInflate() + { + if (istate == null) + throw new ZlibException("No Inflate State!"); + int ret = istate.End(); + istate = null; + return ret; + } + + /// + /// I don't know what this does! + /// + /// Z_OK if everything goes well. + public int SyncInflate() + { + if (istate == null) + throw new ZlibException("No Inflate State!"); + return istate.Sync(); + } + + /// + /// Initialize the ZlibCodec for deflation operation. + /// + /// + /// The codec will use the MAX window bits and the default level of compression. + /// + /// + /// + /// int bufferSize = 40000; + /// byte[] CompressedBytes = new byte[bufferSize]; + /// byte[] DecompressedBytes = new byte[bufferSize]; + /// + /// ZlibCodec compressor = new ZlibCodec(); + /// + /// compressor.InitializeDeflate(CompressionLevel.Default); + /// + /// compressor.InputBuffer = System.Text.ASCIIEncoding.ASCII.GetBytes(TextToCompress); + /// compressor.NextIn = 0; + /// compressor.AvailableBytesIn = compressor.InputBuffer.Length; + /// + /// compressor.OutputBuffer = CompressedBytes; + /// compressor.NextOut = 0; + /// compressor.AvailableBytesOut = CompressedBytes.Length; + /// + /// while (compressor.TotalBytesIn != TextToCompress.Length && compressor.TotalBytesOut < bufferSize) + /// { + /// compressor.Deflate(FlushType.None); + /// } + /// + /// while (true) + /// { + /// int rc= compressor.Deflate(FlushType.Finish); + /// if (rc == ZlibConstants.Z_STREAM_END) break; + /// } + /// + /// compressor.EndDeflate(); + /// + /// + /// + /// Z_OK if all goes well. You generally don't need to check the return code. + public int InitializeDeflate() + { + return _InternalInitializeDeflate(true); + } + + /// + /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel. + /// + /// + /// The codec will use the maximum window bits (15) and the specified + /// CompressionLevel. It will emit a ZLIB stream as it compresses. + /// + /// The compression level for the codec. + /// Z_OK if all goes well. + public int InitializeDeflate(CompressionLevel level) + { + this.CompressLevel = level; + return _InternalInitializeDeflate(true); + } + + + /// + /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel, + /// and the explicit flag governing whether to emit an RFC1950 header byte pair. + /// + /// + /// The codec will use the maximum window bits (15) and the specified CompressionLevel. + /// If you want to generate a zlib stream, you should specify true for + /// wantRfc1950Header. In this case, the library will emit a ZLIB + /// header, as defined in RFC + /// 1950, in the compressed stream. + /// + /// The compression level for the codec. + /// whether to emit an initial RFC1950 byte pair in the compressed stream. + /// Z_OK if all goes well. + public int InitializeDeflate(CompressionLevel level, bool wantRfc1950Header) + { + this.CompressLevel = level; + return _InternalInitializeDeflate(wantRfc1950Header); + } + + + /// + /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel, + /// and the specified number of window bits. + /// + /// + /// The codec will use the specified number of window bits and the specified CompressionLevel. + /// + /// The compression level for the codec. + /// the number of window bits to use. If you don't know what this means, don't use this method. + /// Z_OK if all goes well. + public int InitializeDeflate(CompressionLevel level, int bits) + { + this.CompressLevel = level; + this.WindowBits = bits; + return _InternalInitializeDeflate(true); + } + + /// + /// Initialize the ZlibCodec for deflation operation, using the specified + /// CompressionLevel, the specified number of window bits, and the explicit flag + /// governing whether to emit an RFC1950 header byte pair. + /// + /// + /// The compression level for the codec. + /// whether to emit an initial RFC1950 byte pair in the compressed stream. + /// the number of window bits to use. If you don't know what this means, don't use this method. + /// Z_OK if all goes well. + public int InitializeDeflate(CompressionLevel level, int bits, bool wantRfc1950Header) + { + this.CompressLevel = level; + this.WindowBits = bits; + return _InternalInitializeDeflate(wantRfc1950Header); + } + + private int _InternalInitializeDeflate(bool wantRfc1950Header) + { + if (istate != null) throw new ZlibException("You may not call InitializeDeflate() after calling InitializeInflate()."); + dstate = new DeflateManager(); + dstate.WantRfc1950HeaderBytes = wantRfc1950Header; + + return dstate.Initialize(this, this.CompressLevel, this.WindowBits, this.Strategy); + } + + /// + /// Deflate one batch of data. + /// + /// + /// You must have set InputBuffer and OutputBuffer before calling this method. + /// + /// + /// + /// private void DeflateBuffer(CompressionLevel level) + /// { + /// int bufferSize = 1024; + /// byte[] buffer = new byte[bufferSize]; + /// ZlibCodec compressor = new ZlibCodec(); + /// + /// Console.WriteLine("\n============================================"); + /// Console.WriteLine("Size of Buffer to Deflate: {0} bytes.", UncompressedBytes.Length); + /// MemoryStream ms = new MemoryStream(); + /// + /// int rc = compressor.InitializeDeflate(level); + /// + /// compressor.InputBuffer = UncompressedBytes; + /// compressor.NextIn = 0; + /// compressor.AvailableBytesIn = UncompressedBytes.Length; + /// + /// compressor.OutputBuffer = buffer; + /// + /// // pass 1: deflate + /// do + /// { + /// compressor.NextOut = 0; + /// compressor.AvailableBytesOut = buffer.Length; + /// rc = compressor.Deflate(FlushType.None); + /// + /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + /// throw new Exception("deflating: " + compressor.Message); + /// + /// ms.Write(compressor.OutputBuffer, 0, buffer.Length - compressor.AvailableBytesOut); + /// } + /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0); + /// + /// // pass 2: finish and flush + /// do + /// { + /// compressor.NextOut = 0; + /// compressor.AvailableBytesOut = buffer.Length; + /// rc = compressor.Deflate(FlushType.Finish); + /// + /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK) + /// throw new Exception("deflating: " + compressor.Message); + /// + /// if (buffer.Length - compressor.AvailableBytesOut > 0) + /// ms.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut); + /// } + /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0); + /// + /// compressor.EndDeflate(); + /// + /// ms.Seek(0, SeekOrigin.Begin); + /// CompressedBytes = new byte[compressor.TotalBytesOut]; + /// ms.Read(CompressedBytes, 0, CompressedBytes.Length); + /// } + /// + /// + /// whether to flush all data as you deflate. Generally you will want to + /// use Z_NO_FLUSH here, in a series of calls to Deflate(), and then call EndDeflate() to + /// flush everything. + /// + /// Z_OK if all goes well. + public int Deflate(FlushType flush) + { + if (dstate == null) + throw new ZlibException("No Deflate State!"); + return dstate.Deflate(flush); + } + + /// + /// End a deflation session. + /// + /// + /// Call this after making a series of one or more calls to Deflate(). All buffers are flushed. + /// + /// Z_OK if all goes well. + public int EndDeflate() + { + if (dstate == null) + throw new ZlibException("No Deflate State!"); + // TODO: dinoch Tue, 03 Nov 2009 15:39 (test this) + //int ret = dstate.End(); + dstate = null; + return ZlibConstants.Z_OK; //ret; + } + + /// + /// Reset a codec for another deflation session. + /// + /// + /// Call this to reset the deflation state. For example if a thread is deflating + /// non-consecutive blocks, you can call Reset() after the Deflate(Sync) of the first + /// block and before the next Deflate(None) of the second block. + /// + /// Z_OK if all goes well. + public void ResetDeflate() + { + if (dstate == null) + throw new ZlibException("No Deflate State!"); + dstate.Reset(); + } + + + /// + /// Set the CompressionStrategy and CompressionLevel for a deflation session. + /// + /// the level of compression to use. + /// the strategy to use for compression. + /// Z_OK if all goes well. + public int SetDeflateParams(CompressionLevel level, CompressionStrategy strategy) + { + if (dstate == null) + throw new ZlibException("No Deflate State!"); + return dstate.SetParams(level, strategy); + } + + + /// + /// Set the dictionary to be used for either Inflation or Deflation. + /// + /// The dictionary bytes to use. + /// Z_OK if all goes well. + public int SetDictionary(byte[] dictionary) + { + if (istate != null) + return istate.SetDictionary(dictionary); + + if (dstate != null) + return dstate.SetDictionary(dictionary); + + throw new ZlibException("No Inflate or Deflate state!"); + } + + // Flush as much pending output as possible. All deflate() output goes + // through this function so some applications may wish to modify it + // to avoid allocating a large strm->next_out buffer and copying into it. + // (See also read_buf()). + internal void flush_pending() + { + int len = dstate.pendingCount; + + if (len > AvailableBytesOut) + len = AvailableBytesOut; + if (len == 0) + return; + + if (dstate.pending.Length <= dstate.nextPending || + OutputBuffer.Length <= NextOut || + dstate.pending.Length < (dstate.nextPending + len) || + OutputBuffer.Length < (NextOut + len)) + { + throw new ZlibException(String.Format("Invalid State. (pending.Length={0}, pendingCount={1})", + dstate.pending.Length, dstate.pendingCount)); + } + + Array.Copy(dstate.pending, dstate.nextPending, OutputBuffer, NextOut, len); + + NextOut += len; + dstate.nextPending += len; + TotalBytesOut += len; + AvailableBytesOut -= len; + dstate.pendingCount -= len; + if (dstate.pendingCount == 0) + { + dstate.nextPending = 0; + } + } + + // Read a new buffer from the current input stream, update the adler32 + // and total number of bytes read. All deflate() input goes through + // this function so some applications may wish to modify it to avoid + // allocating a large strm->next_in buffer and copying from it. + // (See also flush_pending()). + internal int read_buf(byte[] buf, int start, int size) + { + int len = AvailableBytesIn; + + if (len > size) + len = size; + if (len == 0) + return 0; + + AvailableBytesIn -= len; + + if (dstate.WantRfc1950HeaderBytes) + { + _Adler32 = Adler.Adler32(_Adler32, InputBuffer, NextIn, len); + } + Array.Copy(InputBuffer, NextIn, buf, start, len); + NextIn += len; + TotalBytesIn += len; + return len; + } + + } +} \ No newline at end of file diff --git a/NBToolkit/NBToolkit/Zlib/ZlibConstants.cs b/NBToolkit/NBToolkit/Zlib/ZlibConstants.cs new file mode 100644 index 0000000..7952ce2 --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/ZlibConstants.cs @@ -0,0 +1,128 @@ +// ZlibConstants.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-November-03 18:50:19> +// +// ------------------------------------------------------------------ +// +// This module defines constants used by the zlib class library. This +// code is derived from the jzlib implementation of zlib, but +// significantly modified. In keeping with the license for jzlib, the +// copyright to that code is included here. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + + +using System; + +namespace Ionic.Zlib +{ + /// + /// A bunch of constants used in the Zlib interface. + /// + public static class ZlibConstants + { + /// + /// The maximum number of window bits for the Deflate algorithm. + /// + public const int WindowBitsMax = 15; // 32K LZ77 window + + /// + /// The default number of window bits for the Deflate algorithm. + /// + public const int WindowBitsDefault = WindowBitsMax; + + /// + /// indicates everything is A-OK + /// + public const int Z_OK = 0; + + /// + /// Indicates that the last operation reached the end of the stream. + /// + public const int Z_STREAM_END = 1; + + /// + /// The operation ended in need of a dictionary. + /// + public const int Z_NEED_DICT = 2; + + /// + /// There was an error with the stream - not enough data, not open and readable, etc. + /// + public const int Z_STREAM_ERROR = -2; + + /// + /// There was an error with the data - not enough data, bad data, etc. + /// + public const int Z_DATA_ERROR = -3; + + /// + /// There was an error with the working buffer. + /// + public const int Z_BUF_ERROR = -5; + + /// + /// The size of the working buffer used in the ZlibCodec class. Defaults to 8192 bytes. + /// +#if NETCF + public const int WorkingBufferSizeDefault = 8192; +#else + public const int WorkingBufferSizeDefault = 16384; +#endif + /// + /// The minimum size of the working buffer used in the ZlibCodec class. Currently it is 128 bytes. + /// + public const int WorkingBufferSizeMin = 1024; + } + +} + diff --git a/NBToolkit/NBToolkit/Zlib/ZlibStream.cs b/NBToolkit/NBToolkit/Zlib/ZlibStream.cs new file mode 100644 index 0000000..0c9a95a --- /dev/null +++ b/NBToolkit/NBToolkit/Zlib/ZlibStream.cs @@ -0,0 +1,682 @@ +// ZlibStream.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2010-January-09 12:03:25> +// +// ------------------------------------------------------------------ +// +// This module defines the ZlibStream class, which is similar in idea to +// the System.IO.Compression.DeflateStream and +// System.IO.Compression.GZipStream classes in the .NET BCL. +// +// ------------------------------------------------------------------ + +using System; +using System.IO; + +namespace Ionic.Zlib +{ + + /// + /// Represents a Zlib stream for compression or decompression. + /// + /// + /// + /// + /// The ZlibStream is a Decorator on a . It adds ZLIB compression or decompression to any + /// stream. + /// + /// + /// Using this stream, applications can compress or decompress data via + /// stream Read() and Write() operations. Either compresssion or + /// decompression can occur through either reading or writing. The compression + /// format used is ZLIB, which is documented in IETF RFC 1950, "ZLIB Compressed + /// Data Format Specification version 3.3". This implementation of ZLIB always uses + /// DEFLATE as the compression method. (see IETF RFC 1951, "DEFLATE + /// Compressed Data Format Specification version 1.3.") + /// + /// + /// The ZLIB format allows for varying compression methods, window sizes, and dictionaries. + /// This implementation always uses the DEFLATE compression method, a preset dictionary, + /// and 15 window bits by default. + /// + /// + /// + /// This class is similar to , except that it adds the + /// RFC1950 header and trailer bytes to a compressed stream when compressing, or expects + /// the RFC1950 header and trailer bytes when decompressing. It is also similar to the + /// . + /// + /// + /// + /// + public class ZlibStream : System.IO.Stream + { + internal ZlibBaseStream _baseStream; + bool _disposed; + + /// + /// Create a ZlibStream using the specified CompressionMode. + /// + /// + /// + /// + /// When mode is CompressionMode.Compress, the ZlibStream will use the + /// default compression level. The "captive" stream will be closed when the + /// ZlibStream is closed. + /// + /// + /// + /// + /// + /// This example uses a ZlibStream to compress a file, and writes the compressed + /// data to another file. + /// + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (var raw = System.IO.File.Create(fileToCompress + ".zlib")) + /// { + /// using (Stream compressor = new ZlibStream(raw, CompressionMode.Compress)) + /// { + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n; + /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0) + /// { + /// compressor.Write(buffer, 0, n); + /// } + /// } + /// } + /// } + /// + /// + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using raw As FileStream = File.Create(fileToCompress & ".zlib") + /// Using compressor As Stream = New ZlibStream(raw, CompressionMode.Compress) + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// End Using + /// + /// + /// + /// The stream which will be read or written. + /// Indicates whether the ZlibStream will compress or decompress. + public ZlibStream(System.IO.Stream stream, CompressionMode mode) + : this(stream, mode, CompressionLevel.Default, false) + { + } + + /// + /// Create a ZlibStream using the specified CompressionMode and + /// the specified CompressionLevel. + /// + /// + /// + /// + /// + /// When mode is CompressionMode.Decompress, the level parameter is ignored. + /// The "captive" stream will be closed when the ZlibStream is closed. + /// + /// + /// + /// + /// + /// This example uses a ZlibStream to compress data from a file, and writes the + /// compressed data to another file. + /// + /// + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (var raw = System.IO.File.Create(fileToCompress + ".zlib")) + /// { + /// using (Stream compressor = new ZlibStream(raw, + /// CompressionMode.Compress, + /// CompressionLevel.BestCompression)) + /// { + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n; + /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0) + /// { + /// compressor.Write(buffer, 0, n); + /// } + /// } + /// } + /// } + /// + /// + /// + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using raw As FileStream = File.Create(fileToCompress & ".zlib") + /// Using compressor As Stream = New ZlibStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression) + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// End Using + /// + /// + /// + /// The stream to be read or written while deflating or inflating. + /// Indicates whether the ZlibStream will compress or decompress. + /// A tuning knob to trade speed for effectiveness. + public ZlibStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level) + : this(stream, mode, level, false) + { + } + + /// + /// Create a ZlibStream using the specified CompressionMode, and + /// explicitly specify whether the captive stream should be left open after + /// Deflation or Inflation. + /// + /// + /// + /// + /// + /// When mode is CompressionMode.Compress, the ZlibStream will use + /// the default compression level. + /// + /// + /// + /// This constructor allows the application to request that the captive stream + /// remain open after the deflation or inflation occurs. By default, after + /// Close() is called on the stream, the captive stream is also + /// closed. In some cases this is not desired, for example if the stream is a + /// that will be re-read after + /// compression. Specify true for the parameter to leave the stream + /// open. + /// + /// + /// + /// See the other overloads of this constructor for example code. + /// + /// + /// + /// + /// The stream which will be read or written. This is called the + /// "captive" stream in other places in this documentation. + /// Indicates whether the ZlibStream will compress or decompress. + /// true if the application would like the stream to remain + /// open after inflation/deflation. + public ZlibStream(System.IO.Stream stream, CompressionMode mode, bool leaveOpen) + : this(stream, mode, CompressionLevel.Default, leaveOpen) + { + } + + /// + /// Create a ZlibStream using the specified CompressionMode and + /// the specified CompressionLevel, and explicitly specify whether the + /// stream should be left open after Deflation or Inflation. + /// + /// + /// + /// + /// + /// This constructor allows the application to request that the captive stream + /// remain open after the deflation or inflation occurs. By default, after + /// Close() is called on the stream, the captive stream is also closed. In + /// some cases this is not desired, for example if the stream is a that will be re-read after compression. + /// Specify true for the parameter to leave the stream open. + /// + /// + /// + /// When mode is CompressionMode.Decompress, the level parameter is ignored. + /// + /// + /// + /// + /// + /// This example shows how to use a ZlibStream to compress the data from a file, + /// and store the result into another file. The filestream remains open to allow + /// additional data to be written to it. + /// + /// using (var output = System.IO.File.Create(fileToCompress + ".zlib")) + /// { + /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress)) + /// { + /// using (Stream compressor = new ZlibStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, true)) + /// { + /// byte[] buffer = new byte[WORKING_BUFFER_SIZE]; + /// int n; + /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0) + /// { + /// compressor.Write(buffer, 0, n); + /// } + /// } + /// } + /// // can write additional data to the output stream here + /// } + /// + /// + /// Using output As FileStream = File.Create(fileToCompress & ".zlib") + /// Using input As Stream = File.OpenRead(fileToCompress) + /// Using compressor As Stream = New ZlibStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, True) + /// Dim buffer As Byte() = New Byte(4096) {} + /// Dim n As Integer = -1 + /// Do While (n <> 0) + /// If (n > 0) Then + /// compressor.Write(buffer, 0, n) + /// End If + /// n = input.Read(buffer, 0, buffer.Length) + /// Loop + /// End Using + /// End Using + /// ' can write additional data to the output stream here. + /// End Using + /// + /// + /// + /// The stream which will be read or written. + /// + /// Indicates whether the ZlibStream will compress or decompress. + /// + /// + /// true if the application would like the stream to remain open after inflation/deflation. + /// + /// + /// + /// A tuning knob to trade speed for effectiveness. This parameter is effective only when + /// mode is CompressionMode.Compress. + /// + public ZlibStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen) + { + _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen); + } + + #region Zlib properties + + /// + /// This property sets the flush behavior on the stream. + /// Sorry, though, not sure exactly how to describe all the various settings. + /// + virtual public FlushType FlushMode + { + get { return (this._baseStream._flushMode); } + set + { + if (_disposed) throw new ObjectDisposedException("ZlibStream"); + this._baseStream._flushMode = value; + } + } + + /// + /// The size of the working buffer for the compression codec. + /// + /// + /// + /// + /// The working buffer is used for all stream operations. The default size is + /// 1024 bytes. The minimum size is 128 bytes. You may get better performance + /// with a larger buffer. Then again, you might not. You would have to test + /// it. + /// + /// + /// + /// Set this before the first call to Read() or Write() on the + /// stream. If you try to set it afterwards, it will throw. + /// + /// + public int BufferSize + { + get + { + return this._baseStream._bufferSize; + } + set + { + if (_disposed) throw new ObjectDisposedException("ZlibStream"); + if (this._baseStream._workingBuffer != null) + throw new ZlibException("The working buffer is already set."); + if (value < ZlibConstants.WorkingBufferSizeMin) + throw new ZlibException(String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, ZlibConstants.WorkingBufferSizeMin)); + this._baseStream._bufferSize = value; + } + } + + /// Returns the total number of bytes input so far. + virtual public long TotalIn + { + get { return this._baseStream._z.TotalBytesIn; } + } + + /// Returns the total number of bytes output so far. + virtual public long TotalOut + { + get { return this._baseStream._z.TotalBytesOut; } + } + + #endregion + + #region System.IO.Stream methods + + /// + /// Dispose the stream. + /// + /// + /// This may or may not result in a Close() call on the captive stream. + /// See the constructors that have a leaveOpen parameter for more information. + /// + protected override void Dispose(bool disposing) + { + try + { + if (!_disposed) + { + if (disposing && (this._baseStream != null)) + this._baseStream.Close(); + _disposed = true; + } + } + finally + { + base.Dispose(disposing); + } + } + + + /// + /// Indicates whether the stream can be read. + /// + /// + /// The return value depends on whether the captive stream supports reading. + /// + public override bool CanRead + { + get + { + if (_disposed) throw new ObjectDisposedException("ZlibStream"); + return _baseStream._stream.CanRead; + } + } + + /// + /// Indicates whether the stream supports Seek operations. + /// + /// + /// Always returns false. + /// + public override bool CanSeek + { + get { return false; } + } + + /// + /// Indicates whether the stream can be written. + /// + /// + /// The return value depends on whether the captive stream supports writing. + /// + public override bool CanWrite + { + get + { + if (_disposed) throw new ObjectDisposedException("ZlibStream"); + return _baseStream._stream.CanWrite; + } + } + + /// + /// Flush the stream. + /// + public override void Flush() + { + if (_disposed) throw new ObjectDisposedException("ZlibStream"); + _baseStream.Flush(); + } + + /// + /// Reading this property always throws a . + /// + public override long Length + { + get { throw new NotImplementedException(); } + } + + /// + /// The position of the stream pointer. + /// + /// + /// + /// Setting this property always throws a . Reading will return the total bytes + /// written out, if used in writing, or the total bytes read in, if used in + /// reading. The count may refer to compressed bytes or uncompressed bytes, + /// depending on how you've used the stream. + /// + public override long Position + { + get + { + if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Writer) + return this._baseStream._z.TotalBytesOut; + if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Reader) + return this._baseStream._z.TotalBytesIn; + return 0; + } + + set { throw new NotImplementedException(); } + } + + /// + /// Read data from the stream. + /// + /// + /// + /// + /// + /// If you wish to use the ZlibStream to compress data while reading, + /// you can create a ZlibStream with CompressionMode.Compress, + /// providing an uncompressed data stream. Then call Read() on that + /// ZlibStream, and the data read will be compressed. If you wish to + /// use the ZlibStream to decompress data while reading, you can create + /// a ZlibStream with CompressionMode.Decompress, providing a + /// readable compressed data stream. Then call Read() on that + /// ZlibStream, and the data will be decompressed as it is read. + /// + /// + /// + /// A ZlibStream can be used for Read() or Write(), but + /// not both. + /// + /// + /// + /// The buffer into which the read data should be placed. + /// the offset within that data array to put the first byte read. + /// the number of bytes to read. + public override int Read(byte[] buffer, int offset, int count) + { + if (_disposed) throw new ObjectDisposedException("ZlibStream"); + return _baseStream.Read(buffer, offset, count); + } + + /// + /// Calling this method always throws a . + /// + public override long Seek(long offset, System.IO.SeekOrigin origin) + { + throw new NotImplementedException(); + } + + /// + /// Calling this method always throws a . + /// + public override void SetLength(long value) + { + throw new NotImplementedException(); + } + + /// + /// Write data to the stream. + /// + /// + /// + /// + /// + /// If you wish to use the ZlibStream to compress data while writing, + /// you can create a ZlibStream with CompressionMode.Compress, + /// and a writable output stream. Then call Write() on that + /// ZlibStream, providing uncompressed data as input. The data sent to + /// the output stream will be the compressed form of the data written. If you + /// wish to use the ZlibStream to decompress data while writing, you + /// can create a ZlibStream with CompressionMode.Decompress, and a + /// writable output stream. Then call Write() on that stream, + /// providing previously compressed data. The data sent to the output stream + /// will be the decompressed form of the data written. + /// + /// + /// + /// A ZlibStream can be used for Read() or Write(), but not both. + /// + /// + /// The buffer holding data to write to the stream. + /// the offset within that data array to find the first byte to write. + /// the number of bytes to write. + public override void Write(byte[] buffer, int offset, int count) + { + if (_disposed) throw new ObjectDisposedException("ZlibStream"); + _baseStream.Write(buffer, offset, count); + } + #endregion + + + /// + /// Compress a string into a byte array using ZLIB. + /// + /// + /// + /// Uncompress it with . + /// + /// + /// + /// + /// + /// + /// A string to compress. The string will first be encoded + /// using UTF8, then compressed. + /// + /// + /// The string in compressed form + public static byte[] CompressString(String s) + { + using (var ms = new MemoryStream()) + { + Stream compressor = + new ZlibStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression); + ZlibBaseStream.CompressString(s, compressor); + return ms.ToArray(); + } + } + + + /// + /// Compress a byte array into a new byte array using ZLIB. + /// + /// + /// + /// Uncompress it with . + /// + /// + /// + /// + /// + /// + /// A buffer to compress. + /// + /// + /// The data in compressed form + public static byte[] CompressBuffer(byte[] b) + { + using (var ms = new MemoryStream()) + { + Stream compressor = + new ZlibStream( ms, CompressionMode.Compress, CompressionLevel.BestCompression ); + + ZlibBaseStream.CompressBuffer(b, compressor); + return ms.ToArray(); + } + } + + + /// + /// Uncompress a ZLIB-compressed byte array into a single string. + /// + /// + /// + /// + /// + /// + /// A buffer containing ZLIB-compressed data. + /// + /// + /// The uncompressed string + public static String UncompressString(byte[] compressed) + { + using (var input = new MemoryStream(compressed)) + { + Stream decompressor = + new ZlibStream(input, CompressionMode.Decompress); + + return ZlibBaseStream.UncompressString(compressed, decompressor); + } + } + + + /// + /// Uncompress a ZLIB-compressed byte array into a byte array. + /// + /// + /// + /// + /// + /// + /// A buffer containing ZLIB-compressed data. + /// + /// + /// The data in uncompressed form + public static byte[] UncompressBuffer(byte[] compressed) + { + using (var input = new MemoryStream(compressed)) + { + Stream decompressor = + new ZlibStream( input, CompressionMode.Decompress ); + + return ZlibBaseStream.UncompressBuffer(compressed, decompressor); + } + } + + } + + +} \ No newline at end of file diff --git a/NBToolkit/chunk.h b/NBToolkit/chunk.h index e8a36ab..591605e 100644 --- a/NBToolkit/chunk.h +++ b/NBToolkit/chunk.h @@ -96,5 +96,31 @@ void chunk_to_coords (char * str, struct chunk_coords * cc); int update_chunk (char * file, char * name, pf_type pf, struct options * opt, void * pf_opt); int update_all_chunks (char *path, pf_type pf, struct options * opt, void * pf_opt); +class MCChunk { +protected: + + // Chunk coordinates (absolute coordinates) + int _cx; + int _cz; + +public: + +}; + +class MCRegionChunk : public MCChunk { +protected: + + MCRegion * _region; + +}; + +class MCFileChunk : public MCChunk { +protected: + + // Path to chunk file + std::String _path; +}; + + #endif diff --git a/NBToolkit/main.c b/NBToolkit/main.c index 475f578..79f3f41 100644 --- a/NBToolkit/main.c +++ b/NBToolkit/main.c @@ -41,7 +41,9 @@ const char * basename (const char *path) { void print_usage (const char *name) { fprintf(stderr, "Usage: %s [options]\n", basename(name)); fprintf(stderr, "Available tools:\n"); + fprintf(stderr, " list : List chunks that match search criteria\n"); fprintf(stderr, " oregen : Generate new ore deposits\n"); + fprintf(stderr, " prune : Delete chunks matching search criteria\n"); fprintf(stderr, " replace : Replace one block type with another\n\n"); fprintf(stderr, "Usage and options for 'oregen':\n"); fprintf(stderr, " %s oregen [options]\n\n", basename(name)); @@ -353,3 +355,23 @@ int main(int argc, char **argv) } } } + +// Commands TO Add: +// - List Chunks +// - Prune Chunks +// - Dump Entities +// - Clear Entities +// - Relight Chunks + +// Options TO Add: +// -la Light Above +// -lb Light Below +// -pa Block Above +// -pb Block Below +// -ps Block to Side +// -de Data Equals +// -dn Data Not Equals +// +// Replace Options +// -mh Y at heightmap +// - \ No newline at end of file