View Javadoc

1   /*
2    * Copyright 2014 The Netty Project
3    *
4    * The Netty Project licenses this file to you under the Apache License,
5    * version 2.0 (the "License"); you may not use this file except in compliance
6    * with the License. You may obtain a copy of the License at:
7    *
8    *   http://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13   * License for the specific language governing permissions and limitations
14   * under the License.
15   */
16  package io.netty.handler.codec.compression;
17  
18  import com.ning.compress.BufferRecycler;
19  import com.ning.compress.lzf.ChunkEncoder;
20  import com.ning.compress.lzf.LZFEncoder;
21  import com.ning.compress.lzf.util.ChunkEncoderFactory;
22  import io.netty.buffer.ByteBuf;
23  import io.netty.channel.ChannelHandlerContext;
24  import io.netty.handler.codec.MessageToByteEncoder;
25  
26  import static com.ning.compress.lzf.LZFChunk.*;
27  
28  /**
29   * Compresses a [email protected] ByteBuf} using the LZF format.
30   *
31   * See original <a href="http://oldhome.schmorp.de/marc/liblzf.html">LZF package</a>
32   * and <a href="https://github.com/ning/compress/wiki/LZFFormat">LZF format</a> for full description.
33   */
34  public class LzfEncoder extends MessageToByteEncoder<ByteBuf> {
35      /**
36       * Minimum block size ready for compression. Blocks with length
37       * less than [email protected] #MIN_BLOCK_TO_COMPRESS} will write as uncompressed.
38       */
39      private static final int MIN_BLOCK_TO_COMPRESS = 16;
40  
41      /**
42       * Underlying decoder in use.
43       */
44      private final ChunkEncoder encoder;
45  
46      /**
47       * Object that handles details of buffer recycling.
48       */
49      private final BufferRecycler recycler;
50  
51      /**
52       * Creates a new LZF encoder with the most optimal available methods for underlying data access.
53       * It will "unsafe" instance if one can be used on current JVM.
54       * It should be safe to call this constructor as implementations are dynamically loaded; however, on some
55       * non-standard platforms it may be necessary to use [email protected] #LzfEncoder(boolean)} with [email protected] true} param.
56       */
57      public LzfEncoder() {
58          this(false, MAX_CHUNK_LEN);
59      }
60  
61      /**
62       * Creates a new LZF encoder with specified encoding instance.
63       *
64       * @param safeInstance
65       *        If [email protected] true} encoder will use [email protected] ChunkEncoder} that only uses standard JDK access methods,
66       *        and should work on all Java platforms and JVMs.
67       *        Otherwise encoder will try to use highly optimized [email protected] ChunkEncoder} implementation that uses
68       *        Sun JDK's [email protected] sun.misc.Unsafe} class (which may be included by other JDK's as well).
69       */
70      public LzfEncoder(boolean safeInstance) {
71          this(safeInstance, MAX_CHUNK_LEN);
72      }
73  
74      /**
75       * Creates a new LZF encoder with specified total length of encoded chunk. You can configure it to encode
76       * your data flow more efficient if you know the avarage size of messages that you send.
77       *
78       * @param totalLength
79       *        Expected total length of content to compress; only matters for outgoing messages that is smaller
80       *        than maximum chunk size (64k), to optimize encoding hash tables.
81       */
82      public LzfEncoder(int totalLength) {
83          this(false, totalLength);
84      }
85  
86      /**
87       * Creates a new LZF encoder with specified settings.
88       *
89       * @param safeInstance
90       *        If [email protected] true} encoder will use [email protected] ChunkEncoder} that only uses standard JDK access methods,
91       *        and should work on all Java platforms and JVMs.
92       *        Otherwise encoder will try to use highly optimized [email protected] ChunkEncoder} implementation that uses
93       *        Sun JDK's [email protected] sun.misc.Unsafe} class (which may be included by other JDK's as well).
94       * @param totalLength
95       *        Expected total length of content to compress; only matters for outgoing messages that is smaller
96       *        than maximum chunk size (64k), to optimize encoding hash tables.
97       */
98      public LzfEncoder(boolean safeInstance, int totalLength) {
99          super(false);
100         if (totalLength < MIN_BLOCK_TO_COMPRESS || totalLength > MAX_CHUNK_LEN) {
101             throw new IllegalArgumentException("totalLength: " + totalLength +
102                     " (expected: " + MIN_BLOCK_TO_COMPRESS + '-' + MAX_CHUNK_LEN + ')');
103         }
104 
105         encoder = safeInstance ?
106                 ChunkEncoderFactory.safeNonAllocatingInstance(totalLength)
107               : ChunkEncoderFactory.optimalNonAllocatingInstance(totalLength);
108 
109         recycler = BufferRecycler.instance();
110     }
111 
112     @Override
113     protected void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) throws Exception {
114         final int length = in.readableBytes();
115         final int idx = in.readerIndex();
116         final byte[] input;
117         final int inputPtr;
118         if (in.hasArray()) {
119             input = in.array();
120             inputPtr = in.arrayOffset() + idx;
121         } else {
122             input = recycler.allocInputBuffer(length);
123             in.getBytes(idx, input, 0, length);
124             inputPtr = 0;
125         }
126 
127         final int maxOutputLength = LZFEncoder.estimateMaxWorkspaceSize(length);
128         out.ensureWritable(maxOutputLength);
129         final byte[] output = out.array();
130         final int outputPtr = out.arrayOffset() + out.writerIndex();
131         final int outputLength = LZFEncoder.appendEncoded(encoder,
132                         input, inputPtr, length,  output, outputPtr) - outputPtr;
133         out.writerIndex(out.writerIndex() + outputLength);
134         in.skipBytes(length);
135 
136         if (!in.hasArray()) {
137             recycler.releaseInputBuffer(input);
138         }
139     }
140 }