diff --git a/.classpath b/.classpath
index 81bbbfc6da7..7b5916556a0 100644
--- a/.classpath
+++ b/.classpath
@@ -7,30 +7,9 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
diff --git a/.project b/.project
index 572b2b3c1e7..f8629a548c9 100644
--- a/.project
+++ b/.project
@@ -43,5 +43,6 @@
org.sonar.ide.eclipse.core.sonarNaturesf.eclipse.javacc.javaccnaturenet.sf.eclipsecs.core.CheckstyleNature
+ org.apache.ivyde.eclipse.ivynature
diff --git a/.travis.yml b/.travis.yml
index f6911d20991..88a2b6c5984 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -15,8 +15,10 @@ before_install:
- >
case "${TRAVIS_OS_NAME:-linux}" in
linux)
+ ant resolve
;;
osx)
+ ant resolve
brew update
brew install ant
;;
diff --git a/build.xml b/build.xml
index 1d20d1b0a54..e0471bc0e89 100644
--- a/build.xml
+++ b/build.xml
@@ -8,8 +8,8 @@
** https://josm.openstreetmap.de/wiki/DevelopersGuide/CreateBuild
**
-->
-
-
+
+
-
+
@@ -116,7 +109,7 @@
-
+
@@ -136,7 +129,7 @@
-
+
@@ -172,6 +165,14 @@ Build-Date: ${build.tstamp}
+
+
+
+
+
+
+
+
@@ -189,13 +190,61 @@ Build-Date: ${build.tstamp}
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
-
+
+
@@ -254,9 +304,10 @@ Build-Date: ${build.tstamp}
+
-
+
@@ -267,92 +318,26 @@ Build-Date: ${build.tstamp}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
-
+
-
+
+
+
+
@@ -392,12 +377,13 @@ Build-Date: ${build.tstamp}
JOSM - Javadoc]]>
@@ -412,7 +398,7 @@ Build-Date: ${build.tstamp}
-
+
@@ -422,6 +408,9 @@ Build-Date: ${build.tstamp}
+
@@ -431,7 +420,7 @@ Build-Date: ${build.tstamp}
-
+
@@ -440,7 +429,8 @@ Build-Date: ${build.tstamp}
-
+
+
@@ -467,7 +457,7 @@ Build-Date: ${build.tstamp}
-
+
@@ -578,11 +568,14 @@ Build-Date: ${build.tstamp}
-
+
+
+
-injars ${dist.jar}
-outjars ${dist-optimized.jar}
+ -libraryjars ${library.jars}
-libraryjars ${java.home}/lib
-dontoptimize
@@ -647,8 +640,9 @@ Build-Date: ${build.tstamp}
+
-
+
@@ -718,7 +712,7 @@ Build-Date: ${build.tstamp}
-
+
@@ -786,7 +780,12 @@ Build-Date: ${build.tstamp}
-
+
+
+
+
+
+
@@ -800,7 +799,7 @@ Build-Date: ${build.tstamp}
-
+
<_taginfo type="mappaint" output="taginfo_style.json"/>
@@ -808,10 +807,14 @@ Build-Date: ${build.tstamp}
<_taginfo type="external_presets" output="taginfo_external_presets.json"/>
-
-
+
+
-
+
+
+
+
+
@@ -841,28 +844,37 @@ Build-Date: ${build.tstamp}
+
+
+
+
+
+
+
+
+
-
+
+ encoding="UTF-8" classpathref="checkstyle.path">
+
-
+
-
+
-
-
+
+
@@ -873,28 +885,27 @@ Build-Date: ${build.tstamp}
-
-
-
-
-
-
-
+
+
+
+
+
+
-
-
+
+
+ ${pmd.dir}/josm-ruleset.xml
@@ -920,7 +931,7 @@ Build-Date: ${build.tstamp}
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ivy.xml b/ivy.xml
new file mode 100644
index 00000000000..19d9dbb531d
--- /dev/null
+++ b/ivy.xml
@@ -0,0 +1,60 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ivysettings.xml b/ivysettings.xml
new file mode 100644
index 00000000000..7f0c3c9bbdf
--- /dev/null
+++ b/ivysettings.xml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/org/apache/commons/compress/MemoryLimitException.java b/src/org/apache/commons/compress/MemoryLimitException.java
deleted file mode 100644
index d251fb3f7e9..00000000000
--- a/src/org/apache/commons/compress/MemoryLimitException.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress;
-
-import java.io.IOException;
-
-/**
- * If a stream checks for estimated memory allocation, and the estimate
- * goes above the memory limit, this is thrown. This can also be thrown
- * if a stream tries to allocate a byte array that is larger than
- * the allowable limit.
- *
- * @since 1.14
- */
-public class MemoryLimitException extends IOException {
-
- private static final long serialVersionUID = 1L;
-
- //long instead of int to account for overflow for corrupt files
- private final long memoryNeededInKb;
- private final int memoryLimitInKb;
-
- public MemoryLimitException(long memoryNeededInKb, int memoryLimitInKb) {
- super(buildMessage(memoryNeededInKb, memoryLimitInKb));
- this.memoryNeededInKb = memoryNeededInKb;
- this.memoryLimitInKb = memoryLimitInKb;
- }
-
- public MemoryLimitException(long memoryNeededInKb, int memoryLimitInKb, Exception e) {
- super(buildMessage(memoryNeededInKb, memoryLimitInKb), e);
- this.memoryNeededInKb = memoryNeededInKb;
- this.memoryLimitInKb = memoryLimitInKb;
- }
-
- public long getMemoryNeededInKb() {
- return memoryNeededInKb;
- }
-
- public int getMemoryLimitInKb() {
- return memoryLimitInKb;
- }
-
- private static String buildMessage(long memoryNeededInKb, int memoryLimitInKb) {
- return memoryNeededInKb + " kb of memory would be needed; limit was "
- + memoryLimitInKb + " kb. " +
- "If the file is not corrupt, consider increasing the memory limit.";
- }
-}
diff --git a/src/org/apache/commons/compress/PasswordRequiredException.java b/src/org/apache/commons/compress/PasswordRequiredException.java
deleted file mode 100644
index d876b96b0e1..00000000000
--- a/src/org/apache/commons/compress/PasswordRequiredException.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress;
-
-import java.io.IOException;
-
-/**
- * Exception thrown when trying to read an encrypted entry or file without
- * configuring a password.
- * @since 1.10
- */
-public class PasswordRequiredException extends IOException {
-
- private static final long serialVersionUID = 1391070005491684483L;
-
- /**
- * Create a new exception.
- *
- * @param name name of the archive containing encrypted streams or
- * the encrypted file.
- */
- public PasswordRequiredException(final String name) {
- super("Cannot read encrypted content from " + name + " without a password.");
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/ArchiveEntry.java b/src/org/apache/commons/compress/archivers/ArchiveEntry.java
deleted file mode 100644
index d5fa746a65a..00000000000
--- a/src/org/apache/commons/compress/archivers/ArchiveEntry.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers;
-
-import java.util.Date;
-
-/**
- * Represents an entry of an archive.
- */
-public interface ArchiveEntry {
-
- /**
- * Gets the name of the entry in this archive. May refer to a file or directory or other item.
- *
- *
This method returns the raw name as it is stored inside of the archive.
- *
- * @return The name of this entry in the archive.
- */
- String getName();
-
- /**
- * Gets the uncompressed size of this entry. May be -1 (SIZE_UNKNOWN) if the size is unknown
- *
- * @return the uncompressed size of this entry.
- */
- long getSize();
-
- /** Special value indicating that the size is unknown */
- long SIZE_UNKNOWN = -1;
-
- /**
- * Returns true if this entry refers to a directory.
- *
- * @return true if this entry refers to a directory.
- */
- boolean isDirectory();
-
- /**
- * Gets the last modified date of this entry.
- *
- * @return the last modified date of this entry.
- * @since 1.1
- */
- Date getLastModifiedDate();
-}
diff --git a/src/org/apache/commons/compress/archivers/ArchiveException.java b/src/org/apache/commons/compress/archivers/ArchiveException.java
deleted file mode 100644
index bb577850c06..00000000000
--- a/src/org/apache/commons/compress/archivers/ArchiveException.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers;
-
-/**
- * Archiver related Exception
- */
-public class ArchiveException extends Exception {
-
- /** Serial */
- private static final long serialVersionUID = 2772690708123267100L;
-
- /**
- * Constructs a new exception with the specified detail message. The cause
- * is not initialized.
- *
- * @param message
- * the detail message
- */
- public ArchiveException(final String message) {
- super(message);
- }
-
- /**
- * Constructs a new exception with the specified detail message and cause.
- *
- * @param message
- * the detail message
- * @param cause
- * the cause
- */
- public ArchiveException(final String message, final Exception cause) {
- super(message);
- this.initCause(cause);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/ArchiveInputStream.java b/src/org/apache/commons/compress/archivers/ArchiveInputStream.java
deleted file mode 100644
index 9c4e9782113..00000000000
--- a/src/org/apache/commons/compress/archivers/ArchiveInputStream.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * Archive input streams MUST override the
- * {@link #read(byte[], int, int)} - or {@link #read()} -
- * method so that reading from the stream generates EOF for the end of
- * data in each entry as well as at the end of the file proper.
- *
- * The {@link #getNextEntry()} method is used to reset the input stream
- * ready for reading the data from the next entry.
- *
- * The input stream classes must also implement a method with the signature:
- *
- * public static boolean matches(byte[] signature, int length)
- *
- * which is used by the {@link ArchiveStreamFactory} to autodetect
- * the archive type from the first few bytes of a stream.
- */
-public abstract class ArchiveInputStream extends InputStream {
-
- private final byte[] single = new byte[1];
- private static final int BYTE_MASK = 0xFF;
-
- /** holds the number of bytes read in this stream */
- private long bytesRead = 0;
-
- /**
- * Returns the next Archive Entry in this Stream.
- *
- * @return the next entry,
- * or {@code null} if there are no more entries
- * @throws IOException if the next entry could not be read
- */
- public abstract ArchiveEntry getNextEntry() throws IOException;
-
- /*
- * Note that subclasses also implement specific get() methods which
- * return the appropriate class without need for a cast.
- * See SVN revision r743259
- * @return
- * @throws IOException
- */
- // public abstract XXXArchiveEntry getNextXXXEntry() throws IOException;
-
- /**
- * Reads a byte of data. This method will block until enough input is
- * available.
- *
- * Simply calls the {@link #read(byte[], int, int)} method.
- *
- * MUST be overridden if the {@link #read(byte[], int, int)} method
- * is not overridden; may be overridden otherwise.
- *
- * @return the byte read, or -1 if end of input is reached
- * @throws IOException
- * if an I/O error has occurred
- */
- @Override
- public int read() throws IOException {
- final int num = read(single, 0, 1);
- return num == -1 ? -1 : single[0] & BYTE_MASK;
- }
-
- /**
- * Increments the counter of already read bytes.
- * Doesn't increment if the EOF has been hit (read == -1)
- *
- * @param read the number of bytes read
- */
- protected void count(final int read) {
- count((long) read);
- }
-
- /**
- * Increments the counter of already read bytes.
- * Doesn't increment if the EOF has been hit (read == -1)
- *
- * @param read the number of bytes read
- * @since 1.1
- */
- protected void count(final long read) {
- if (read != -1) {
- bytesRead = bytesRead + read;
- }
- }
-
- /**
- * Decrements the counter of already read bytes.
- *
- * @param pushedBack the number of bytes pushed back.
- * @since 1.1
- */
- protected void pushedBackBytes(final long pushedBack) {
- bytesRead -= pushedBack;
- }
-
- /**
- * Returns the current number of bytes read from this stream.
- * @return the number of read bytes
- * @deprecated this method may yield wrong results for large
- * archives, use #getBytesRead instead
- */
- @Deprecated
- public int getCount() {
- return (int) bytesRead;
- }
-
- /**
- * Returns the current number of bytes read from this stream.
- * @return the number of read bytes
- * @since 1.1
- */
- public long getBytesRead() {
- return bytesRead;
- }
-
- /**
- * Whether this stream is able to read the given entry.
- *
- *
- * Some archive formats support variants or details that are not supported (yet).
- *
- *
- * @param archiveEntry
- * the entry to test
- * @return This implementation always returns true.
- *
- * @since 1.1
- */
- public boolean canReadEntryData(final ArchiveEntry archiveEntry) {
- return true;
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/ArchiveOutputStream.java b/src/org/apache/commons/compress/archivers/ArchiveOutputStream.java
deleted file mode 100644
index 4377b6dc449..00000000000
--- a/src/org/apache/commons/compress/archivers/ArchiveOutputStream.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * Archive output stream implementations are expected to override the
- * {@link #write(byte[], int, int)} method to improve performance.
- * They should also override {@link #close()} to ensure that any necessary
- * trailers are added.
- *
- *
The normal sequence of calls when working with ArchiveOutputStreams is:
{@link #write(byte[])} (writes entry data, as often as needed),
- *
{@link #closeArchiveEntry()} (closes entry),
- *
- *
- *
{@link #finish()} (ends the addition of entries),
- *
optionally write additional data, provided format supports it,
- *
{@link #close()}.
- *
- */
-public abstract class ArchiveOutputStream extends OutputStream {
-
- /** Temporary buffer used for the {@link #write(int)} method */
- private final byte[] oneByte = new byte[1];
- static final int BYTE_MASK = 0xFF;
-
- /** holds the number of bytes written to this stream */
- private long bytesWritten = 0;
- // Methods specific to ArchiveOutputStream
-
- /**
- * Writes the headers for an archive entry to the output stream.
- * The caller must then write the content to the stream and call
- * {@link #closeArchiveEntry()} to complete the process.
- *
- * @param entry describes the entry
- * @throws IOException if an I/O error occurs
- */
- public abstract void putArchiveEntry(ArchiveEntry entry) throws IOException;
-
- /**
- * Closes the archive entry, writing any trailer information that may
- * be required.
- * @throws IOException if an I/O error occurs
- */
- public abstract void closeArchiveEntry() throws IOException;
-
- /**
- * Finishes the addition of entries to this stream, without closing it.
- * Additional data can be written, if the format supports it.
- *
- * @throws IOException if the user forgets to close the entry.
- */
- public abstract void finish() throws IOException;
-
- /**
- * Create an archive entry using the inputFile and entryName provided.
- *
- * @param inputFile the file to create the entry from
- * @param entryName name to use for the entry
- * @return the ArchiveEntry set up with details from the file
- *
- * @throws IOException if an I/O error occurs
- */
- public abstract ArchiveEntry createArchiveEntry(File inputFile, String entryName) throws IOException;
-
- // Generic implementations of OutputStream methods that may be useful to sub-classes
-
- /**
- * Writes a byte to the current archive entry.
- *
- *
MUST be overridden if the {@link #write(byte[], int, int)} method
- * is not overridden; may be overridden otherwise.
- *
- * @param b The byte to be written.
- * @throws IOException on error
- */
- @Override
- public void write(final int b) throws IOException {
- oneByte[0] = (byte) (b & BYTE_MASK);
- write(oneByte, 0, 1);
- }
-
- /**
- * Increments the counter of already written bytes.
- * Doesn't increment if EOF has been hit ({@code written == -1}).
- *
- * @param written the number of bytes written
- */
- protected void count(final int written) {
- count((long) written);
- }
-
- /**
- * Increments the counter of already written bytes.
- * Doesn't increment if EOF has been hit ({@code written == -1}).
- *
- * @param written the number of bytes written
- * @since 1.1
- */
- protected void count(final long written) {
- if (written != -1) {
- bytesWritten = bytesWritten + written;
- }
- }
-
- /**
- * Returns the current number of bytes written to this stream.
- * @return the number of written bytes
- * @deprecated this method may yield wrong results for large
- * archives, use #getBytesWritten instead
- */
- @Deprecated
- public int getCount() {
- return (int) bytesWritten;
- }
-
- /**
- * Returns the current number of bytes written to this stream.
- * @return the number of written bytes
- * @since 1.1
- */
- public long getBytesWritten() {
- return bytesWritten;
- }
-
- /**
- * Whether this stream is able to write the given entry.
- *
- *
Some archive formats support variants or details that are
- * not supported (yet).
- *
- * @param archiveEntry
- * the entry to test
- * @return This implementation always returns true.
- * @since 1.1
- */
- public boolean canWriteEntryData(final ArchiveEntry archiveEntry) {
- return true;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/ArchiveStreamFactory.java b/src/org/apache/commons/compress/archivers/ArchiveStreamFactory.java
deleted file mode 100644
index 3cd8ba76355..00000000000
--- a/src/org/apache/commons/compress/archivers/ArchiveStreamFactory.java
+++ /dev/null
@@ -1,592 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Locale;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.apache.commons.compress.archivers.ar.ArArchiveInputStream;
-import org.apache.commons.compress.archivers.ar.ArArchiveOutputStream;
-import org.apache.commons.compress.archivers.arj.ArjArchiveInputStream;
-import org.apache.commons.compress.archivers.cpio.CpioArchiveInputStream;
-import org.apache.commons.compress.archivers.cpio.CpioArchiveOutputStream;
-import org.apache.commons.compress.archivers.dump.DumpArchiveInputStream;
-import org.apache.commons.compress.archivers.jar.JarArchiveInputStream;
-import org.apache.commons.compress.archivers.jar.JarArchiveOutputStream;
-import org.apache.commons.compress.archivers.sevenz.SevenZFile;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
-import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
-import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.Lists;
-import org.apache.commons.compress.utils.ServiceLoaderIterator;
-import org.apache.commons.compress.utils.Sets;
-
-/**
- * Factory to create Archive[In|Out]putStreams from names or the first bytes of
- * the InputStream. In order to add other implementations, you should extend
- * ArchiveStreamFactory and override the appropriate methods (and call their
- * implementation from super of course).
- *
- * Compressing a ZIP-File:
- *
- *
- * final InputStream is = Files.newInputStream(input.toPath());
- * ArchiveInputStream in = new ArchiveStreamFactory().createArchiveInputStream(ArchiveStreamFactory.ZIP, is);
- * ZipArchiveEntry entry = (ZipArchiveEntry)in.getNextEntry();
- * OutputStream out = Files.newOutputStream(dir.toPath().resolve(entry.getName()));
- * IOUtils.copy(in, out);
- * out.close();
- * in.close();
- *
- * @Immutable provided that the deprecated method setEntryEncoding is not used.
- * @ThreadSafe even if the deprecated method setEntryEncoding is used
- */
-public class ArchiveStreamFactory implements ArchiveStreamProvider {
-
- private static final int TAR_HEADER_SIZE = 512;
-
- private static final int DUMP_SIGNATURE_SIZE = 32;
-
- private static final int SIGNATURE_SIZE = 12;
-
- private static final ArchiveStreamFactory SINGLETON = new ArchiveStreamFactory();
-
- /**
- * Constant (value {@value}) used to identify the AR archive format.
- * @since 1.1
- */
- public static final String AR = "ar";
-
- /**
- * Constant (value {@value}) used to identify the ARJ archive format.
- * Not supported as an output stream type.
- * @since 1.6
- */
- public static final String ARJ = "arj";
-
- /**
- * Constant (value {@value}) used to identify the CPIO archive format.
- * @since 1.1
- */
- public static final String CPIO = "cpio";
-
- /**
- * Constant (value {@value}) used to identify the Unix DUMP archive format.
- * Not supported as an output stream type.
- * @since 1.3
- */
- public static final String DUMP = "dump";
-
- /**
- * Constant (value {@value}) used to identify the JAR archive format.
- * @since 1.1
- */
- public static final String JAR = "jar";
-
- /**
- * Constant used to identify the TAR archive format.
- * @since 1.1
- */
- public static final String TAR = "tar";
-
- /**
- * Constant (value {@value}) used to identify the ZIP archive format.
- * @since 1.1
- */
- public static final String ZIP = "zip";
-
- /**
- * Constant (value {@value}) used to identify the 7z archive format.
- * @since 1.8
- */
- public static final String SEVEN_Z = "7z";
-
- /**
- * Entry encoding, null for the platform default.
- */
- private final String encoding;
-
- /**
- * Entry encoding, null for the default.
- */
- private volatile String entryEncoding;
-
- private SortedMap archiveInputStreamProviders;
-
- private SortedMap archiveOutputStreamProviders;
-
- private static ArrayList findArchiveStreamProviders() {
- return Lists.newArrayList(serviceLoaderIterator());
- }
-
- static void putAll(Set names, ArchiveStreamProvider provider,
- TreeMap map) {
- for (String name : names) {
- map.put(toKey(name), provider);
- }
- }
-
- private static Iterator serviceLoaderIterator() {
- return new ServiceLoaderIterator<>(ArchiveStreamProvider.class);
- }
-
- private static String toKey(final String name) {
- return name.toUpperCase(Locale.ROOT);
- }
-
- /**
- * Constructs a new sorted map from input stream provider names to provider
- * objects.
- *
- *
- * The map returned by this method will have one entry for each provider for
- * which support is available in the current Java virtual machine. If two or
- * more supported provider have the same name then the resulting map will
- * contain just one of them; which one it will contain is not specified.
- *
- *
- *
- * The invocation of this method, and the subsequent use of the resulting
- * map, may cause time-consuming disk or network I/O operations to occur.
- * This method is provided for applications that need to enumerate all of
- * the available providers, for example to allow user provider selection.
- *
- *
- *
- * This method may return different results at different times if new
- * providers are dynamically made available to the current Java virtual
- * machine.
- *
- *
- * @return An immutable, map from names to provider objects
- * @since 1.13
- */
- public static SortedMap findAvailableArchiveInputStreamProviders() {
- return AccessController.doPrivileged(new PrivilegedAction>() {
- @Override
- public SortedMap run() {
- TreeMap map = new TreeMap<>();
- putAll(SINGLETON.getInputStreamArchiveNames(), SINGLETON, map);
- for (ArchiveStreamProvider provider : findArchiveStreamProviders()) {
- putAll(provider.getInputStreamArchiveNames(), provider, map);
- }
- return map;
- }
- });
- }
-
- /**
- * Constructs a new sorted map from output stream provider names to provider
- * objects.
- *
- *
- * The map returned by this method will have one entry for each provider for
- * which support is available in the current Java virtual machine. If two or
- * more supported provider have the same name then the resulting map will
- * contain just one of them; which one it will contain is not specified.
- *
- *
- *
- * The invocation of this method, and the subsequent use of the resulting
- * map, may cause time-consuming disk or network I/O operations to occur.
- * This method is provided for applications that need to enumerate all of
- * the available providers, for example to allow user provider selection.
- *
- *
- *
- * This method may return different results at different times if new
- * providers are dynamically made available to the current Java virtual
- * machine.
- *
- *
- * @return An immutable, map from names to provider objects
- * @since 1.13
- */
- public static SortedMap findAvailableArchiveOutputStreamProviders() {
- return AccessController.doPrivileged(new PrivilegedAction>() {
- @Override
- public SortedMap run() {
- TreeMap map = new TreeMap<>();
- putAll(SINGLETON.getOutputStreamArchiveNames(), SINGLETON, map);
- for (ArchiveStreamProvider provider : findArchiveStreamProviders()) {
- putAll(provider.getOutputStreamArchiveNames(), provider, map);
- }
- return map;
- }
- });
- }
-
- /**
- * Create an instance using the platform default encoding.
- */
- public ArchiveStreamFactory() {
- this(null);
- }
-
- /**
- * Create an instance using the specified encoding.
- *
- * @param encoding the encoding to be used.
- *
- * @since 1.10
- */
- public ArchiveStreamFactory(final String encoding) {
- super();
- this.encoding = encoding;
- // Also set the original field so can continue to use it.
- this.entryEncoding = encoding;
- }
-
- /**
- * Returns the encoding to use for arj, jar, zip, dump, cpio and tar
- * files, or null for the archiver default.
- *
- * @return entry encoding, or null for the archiver default
- * @since 1.5
- */
- public String getEntryEncoding() {
- return entryEncoding;
- }
-
- /**
- * Sets the encoding to use for arj, jar, zip, dump, cpio and tar files. Use null for the archiver default.
- *
- * @param entryEncoding the entry encoding, null uses the archiver default.
- * @since 1.5
- * @deprecated 1.10 use {@link #ArchiveStreamFactory(String)} to specify the encoding
- * @throws IllegalStateException if the constructor {@link #ArchiveStreamFactory(String)}
- * was used to specify the factory encoding.
- */
- @Deprecated
- public void setEntryEncoding(final String entryEncoding) {
- // Note: this does not detect new ArchiveStreamFactory(null) but that does not set the encoding anyway
- if (encoding != null) {
- throw new IllegalStateException("Cannot overide encoding set by the constructor");
- }
- this.entryEncoding = entryEncoding;
- }
-
- /**
- * Creates an archive input stream from an archiver name and an input stream.
- *
- * @param archiverName the archive name,
- * i.e. {@value #AR}, {@value #ARJ}, {@value #ZIP}, {@value #TAR}, {@value #JAR}, {@value #CPIO}, {@value #DUMP} or {@value #SEVEN_Z}
- * @param in the input stream
- * @return the archive input stream
- * @throws ArchiveException if the archiver name is not known
- * @throws StreamingNotSupportedException if the format cannot be
- * read from a stream
- * @throws IllegalArgumentException if the archiver name or stream is null
- */
- public ArchiveInputStream createArchiveInputStream(final String archiverName, final InputStream in)
- throws ArchiveException {
- return createArchiveInputStream(archiverName, in, entryEncoding);
- }
-
- @Override
- public ArchiveInputStream createArchiveInputStream(final String archiverName, final InputStream in,
- final String actualEncoding) throws ArchiveException {
-
- if (archiverName == null) {
- throw new IllegalArgumentException("Archivername must not be null.");
- }
-
- if (in == null) {
- throw new IllegalArgumentException("InputStream must not be null.");
- }
-
- if (AR.equalsIgnoreCase(archiverName)) {
- return new ArArchiveInputStream(in);
- }
- if (ARJ.equalsIgnoreCase(archiverName)) {
- if (actualEncoding != null) {
- return new ArjArchiveInputStream(in, actualEncoding);
- }
- return new ArjArchiveInputStream(in);
- }
- if (ZIP.equalsIgnoreCase(archiverName)) {
- if (actualEncoding != null) {
- return new ZipArchiveInputStream(in, actualEncoding);
- }
- return new ZipArchiveInputStream(in);
- }
- if (TAR.equalsIgnoreCase(archiverName)) {
- if (actualEncoding != null) {
- return new TarArchiveInputStream(in, actualEncoding);
- }
- return new TarArchiveInputStream(in);
- }
- if (JAR.equalsIgnoreCase(archiverName)) {
- if (actualEncoding != null) {
- return new JarArchiveInputStream(in, actualEncoding);
- }
- return new JarArchiveInputStream(in);
- }
- if (CPIO.equalsIgnoreCase(archiverName)) {
- if (actualEncoding != null) {
- return new CpioArchiveInputStream(in, actualEncoding);
- }
- return new CpioArchiveInputStream(in);
- }
- if (DUMP.equalsIgnoreCase(archiverName)) {
- if (actualEncoding != null) {
- return new DumpArchiveInputStream(in, actualEncoding);
- }
- return new DumpArchiveInputStream(in);
- }
- if (SEVEN_Z.equalsIgnoreCase(archiverName)) {
- throw new StreamingNotSupportedException(SEVEN_Z);
- }
-
- final ArchiveStreamProvider archiveStreamProvider = getArchiveInputStreamProviders().get(toKey(archiverName));
- if (archiveStreamProvider != null) {
- return archiveStreamProvider.createArchiveInputStream(archiverName, in, actualEncoding);
- }
-
- throw new ArchiveException("Archiver: " + archiverName + " not found.");
- }
-
- /**
- * Creates an archive output stream from an archiver name and an output stream.
- *
- * @param archiverName the archive name,
- * i.e. {@value #AR}, {@value #ZIP}, {@value #TAR}, {@value #JAR} or {@value #CPIO}
- * @param out the output stream
- * @return the archive output stream
- * @throws ArchiveException if the archiver name is not known
- * @throws StreamingNotSupportedException if the format cannot be
- * written to a stream
- * @throws IllegalArgumentException if the archiver name or stream is null
- */
- public ArchiveOutputStream createArchiveOutputStream(final String archiverName, final OutputStream out)
- throws ArchiveException {
- return createArchiveOutputStream(archiverName, out, entryEncoding);
- }
-
- @Override
- public ArchiveOutputStream createArchiveOutputStream(
- final String archiverName, final OutputStream out, final String actualEncoding)
- throws ArchiveException {
- if (archiverName == null) {
- throw new IllegalArgumentException("Archivername must not be null.");
- }
- if (out == null) {
- throw new IllegalArgumentException("OutputStream must not be null.");
- }
-
- if (AR.equalsIgnoreCase(archiverName)) {
- return new ArArchiveOutputStream(out);
- }
- if (ZIP.equalsIgnoreCase(archiverName)) {
- final ZipArchiveOutputStream zip = new ZipArchiveOutputStream(out);
- if (actualEncoding != null) {
- zip.setEncoding(actualEncoding);
- }
- return zip;
- }
- if (TAR.equalsIgnoreCase(archiverName)) {
- if (actualEncoding != null) {
- return new TarArchiveOutputStream(out, actualEncoding);
- }
- return new TarArchiveOutputStream(out);
- }
- if (JAR.equalsIgnoreCase(archiverName)) {
- if (actualEncoding != null) {
- return new JarArchiveOutputStream(out, actualEncoding);
- }
- return new JarArchiveOutputStream(out);
- }
- if (CPIO.equalsIgnoreCase(archiverName)) {
- if (actualEncoding != null) {
- return new CpioArchiveOutputStream(out, actualEncoding);
- }
- return new CpioArchiveOutputStream(out);
- }
- if (SEVEN_Z.equalsIgnoreCase(archiverName)) {
- throw new StreamingNotSupportedException(SEVEN_Z);
- }
-
- final ArchiveStreamProvider archiveStreamProvider = getArchiveOutputStreamProviders().get(toKey(archiverName));
- if (archiveStreamProvider != null) {
- return archiveStreamProvider.createArchiveOutputStream(archiverName, out, actualEncoding);
- }
-
- throw new ArchiveException("Archiver: " + archiverName + " not found.");
- }
-
- /**
- * Create an archive input stream from an input stream, autodetecting
- * the archive type from the first few bytes of the stream. The InputStream
- * must support marks, like BufferedInputStream.
- *
- * @param in the input stream
- * @return the archive input stream
- * @throws ArchiveException if the archiver name is not known
- * @throws StreamingNotSupportedException if the format cannot be
- * read from a stream
- * @throws IllegalArgumentException if the stream is null or does not support mark
- */
- public ArchiveInputStream createArchiveInputStream(final InputStream in)
- throws ArchiveException {
- return createArchiveInputStream(detect(in), in);
- }
-
- /**
- * Try to determine the type of Archiver
- * @param in input stream
- * @return type of archiver if found
- * @throws ArchiveException if an archiver cannot be detected in the stream
- * @since 1.14
- */
- public static String detect(InputStream in) throws ArchiveException {
- if (in == null) {
- throw new IllegalArgumentException("Stream must not be null.");
- }
-
- if (!in.markSupported()) {
- throw new IllegalArgumentException("Mark is not supported.");
- }
-
- final byte[] signature = new byte[SIGNATURE_SIZE];
- in.mark(signature.length);
- int signatureLength = -1;
- try {
- signatureLength = IOUtils.readFully(in, signature);
- in.reset();
- } catch (IOException e) {
- throw new ArchiveException("IOException while reading signature.", e);
- }
-
- if (ZipArchiveInputStream.matches(signature, signatureLength)) {
- return ZIP;
- } else if (JarArchiveInputStream.matches(signature, signatureLength)) {
- return JAR;
- } else if (ArArchiveInputStream.matches(signature, signatureLength)) {
- return AR;
- } else if (CpioArchiveInputStream.matches(signature, signatureLength)) {
- return CPIO;
- } else if (ArjArchiveInputStream.matches(signature, signatureLength)) {
- return ARJ;
- } else if (SevenZFile.matches(signature, signatureLength)) {
- return SEVEN_Z;
- }
-
- // Dump needs a bigger buffer to check the signature;
- final byte[] dumpsig = new byte[DUMP_SIGNATURE_SIZE];
- in.mark(dumpsig.length);
- try {
- signatureLength = IOUtils.readFully(in, dumpsig);
- in.reset();
- } catch (IOException e) {
- throw new ArchiveException("IOException while reading dump signature", e);
- }
- if (DumpArchiveInputStream.matches(dumpsig, signatureLength)) {
- return DUMP;
- }
-
- // Tar needs an even bigger buffer to check the signature; read the first block
- final byte[] tarHeader = new byte[TAR_HEADER_SIZE];
- in.mark(tarHeader.length);
- try {
- signatureLength = IOUtils.readFully(in, tarHeader);
- in.reset();
- } catch (IOException e) {
- throw new ArchiveException("IOException while reading tar signature", e);
- }
- if (TarArchiveInputStream.matches(tarHeader, signatureLength)) {
- return TAR;
- }
-
- // COMPRESS-117 - improve auto-recognition
- if (signatureLength >= TAR_HEADER_SIZE) {
- TarArchiveInputStream tais = null;
- try {
- tais = new TarArchiveInputStream(new ByteArrayInputStream(tarHeader));
- // COMPRESS-191 - verify the header checksum
- if (tais.getNextTarEntry().isCheckSumOK()) {
- return TAR;
- }
- } catch (final Exception e) { // NOPMD // NOSONAR
- // can generate IllegalArgumentException as well
- // as IOException
- // autodetection, simply not a TAR
- // ignored
- } finally {
- IOUtils.closeQuietly(tais);
- }
- }
- throw new ArchiveException("No Archiver found for the stream signature");
- }
-
- public SortedMap getArchiveInputStreamProviders() {
- if (archiveInputStreamProviders == null) {
- archiveInputStreamProviders = Collections
- .unmodifiableSortedMap(findAvailableArchiveInputStreamProviders());
- }
- return archiveInputStreamProviders;
- }
-
- public SortedMap getArchiveOutputStreamProviders() {
- if (archiveOutputStreamProviders == null) {
- archiveOutputStreamProviders = Collections
- .unmodifiableSortedMap(findAvailableArchiveOutputStreamProviders());
- }
- return archiveOutputStreamProviders;
- }
-
- @Override
- public Set getInputStreamArchiveNames() {
- return Sets.newHashSet(AR, ARJ, ZIP, TAR, JAR, CPIO, DUMP, SEVEN_Z);
- }
-
- @Override
- public Set getOutputStreamArchiveNames() {
- return Sets.newHashSet(AR, ZIP, TAR, JAR, CPIO, SEVEN_Z);
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/ArchiveStreamProvider.java b/src/org/apache/commons/compress/archivers/ArchiveStreamProvider.java
deleted file mode 100644
index eb1862c9dea..00000000000
--- a/src/org/apache/commons/compress/archivers/ArchiveStreamProvider.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.archivers;
-
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Set;
-
-/**
- * Creates Archive {@link ArchiveInputStream}s and {@link ArchiveOutputStream}s.
- *
- * @since 1.13
- */
-public interface ArchiveStreamProvider {
-
- /**
- * Creates an archive input stream from an archiver name and an input
- * stream.
- *
- * @param name
- * the archive name, i.e.
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#AR},
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#ARJ},
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#ZIP},
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#TAR},
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#JAR},
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#CPIO},
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#DUMP}
- * or
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#SEVEN_Z}
- * @param in
- * the input stream
- * @param encoding
- * encoding name or null for the default
- * @return the archive input stream
- * @throws ArchiveException
- * if the archiver name is not known
- * @throws StreamingNotSupportedException
- * if the format cannot be read from a stream
- * @throws IllegalArgumentException
- * if the archiver name or stream is null
- */
- ArchiveInputStream createArchiveInputStream(final String name, final InputStream in, final String encoding)
- throws ArchiveException;
-
- /**
- * Creates an archive output stream from an archiver name and an output
- * stream.
- *
- * @param name
- * the archive name, i.e.
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#AR},
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#ZIP},
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#TAR},
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#JAR}
- * or
- * {@value org.apache.commons.compress.archivers.ArchiveStreamFactory#CPIO}
- * @param out
- * the output stream
- * @param encoding
- * encoding name or null for the default
- * @return the archive output stream
- * @throws ArchiveException
- * if the archiver name is not known
- * @throws StreamingNotSupportedException
- * if the format cannot be written to a stream
- * @throws IllegalArgumentException
- * if the archiver name or stream is null
- */
- ArchiveOutputStream createArchiveOutputStream(final String name, final OutputStream out, final String encoding)
- throws ArchiveException;
-
- /**
- * Gets all the input stream archive names for this provider
- *
- * @return all the input archive names for this provider
- */
- Set getInputStreamArchiveNames();
-
- /**
- * Gets all the output stream archive names for this provider
- *
- * @return all the output archive names for this provider
- */
- Set getOutputStreamArchiveNames();
-
-}
diff --git a/src/org/apache/commons/compress/archivers/Archiver.java b/src/org/apache/commons/compress/archivers/Archiver.java
deleted file mode 100644
index 4e82640a84a..00000000000
--- a/src/org/apache/commons/compress/archivers/Archiver.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers;
-
-import java.io.BufferedInputStream;
-import java.io.File;
-import java.io.FileFilter;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.Channels;
-import java.nio.channels.FileChannel;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.file.StandardOpenOption;
-
-import org.apache.commons.compress.archivers.sevenz.SevenZOutputFile;
-import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Provides a high level API for creating archives.
- * @since 1.17
- */
-public class Archiver {
-
- private static final FileFilter ACCEPT_ALL = new FileFilter() {
- @Override
- public boolean accept(File f) {
- return true;
- }
- };
-
- private interface ArchiveEntryCreator {
- ArchiveEntry create(File f, String entryName) throws IOException;
- }
-
- private interface ArchiveEntryConsumer {
- void accept(File source, ArchiveEntry entry) throws IOException;
- }
-
- private interface Finisher {
- void finish() throws IOException;
- }
-
- /**
- * Creates an archive {@code target} using the format {@code
- * format} by recursively including all files and directories in
- * {@code directory}.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the file to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(String format, File target, File directory) throws IOException, ArchiveException {
- create(format, target, directory, ACCEPT_ALL);
- }
-
- /**
- * Creates an archive {@code target} using the format {@code
- * format} by recursively including all files and directories in
- * {@code directory} that are accepted by {@code filter}.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the file to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @param filter selects the files and directories to include inside the archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(String format, File target, File directory, FileFilter filter)
- throws IOException, ArchiveException {
- if (prefersSeekableByteChannel(format)) {
- try (SeekableByteChannel c = FileChannel.open(target.toPath(), StandardOpenOption.WRITE,
- StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
- create(format, c, directory, filter);
- }
- return;
- }
- try (OutputStream o = new FileOutputStream(target)) {
- create(format, o, directory, filter);
- }
- }
-
- /**
- * Creates an archive {@code target} using the format {@code
- * format} by recursively including all files and directories in
- * {@code directory}.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the stream to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(String format, OutputStream target, File directory) throws IOException, ArchiveException {
- create(format, target, directory, ACCEPT_ALL);
- }
-
- /**
- * Creates an archive {@code target} using the format {@code
- * format} by recursively including all files and directories in
- * {@code directory} that are accepted by {@code filter}.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the stream to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @param filter selects the files and directories to include inside the archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(String format, OutputStream target, File directory, FileFilter filter)
- throws IOException, ArchiveException {
- create(new ArchiveStreamFactory().createArchiveOutputStream(format, target), directory, filter);
- }
-
- /**
- * Creates an archive {@code target} using the format {@code
- * format} by recursively including all files and directories in
- * {@code directory}.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the channel to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(String format, SeekableByteChannel target, File directory)
- throws IOException, ArchiveException {
- create(format, target, directory, ACCEPT_ALL);
- }
-
- /**
- * Creates an archive {@code target} using the format {@code
- * format} by recursively including all files and directories in
- * {@code directory} that are accepted by {@code filter}.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the channel to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @param filter selects the files and directories to include inside the archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(String format, SeekableByteChannel target, File directory, FileFilter filter)
- throws IOException, ArchiveException {
- if (!prefersSeekableByteChannel(format)) {
- create(format, Channels.newOutputStream(target), directory, filter);
- } else if (ArchiveStreamFactory.ZIP.equalsIgnoreCase(format)) {
- create(new ZipArchiveOutputStream(target), directory, filter);
- } else if (ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format)) {
- create(new SevenZOutputFile(target), directory, filter);
- } else {
- throw new ArchiveException("don't know how to handle format " + format);
- }
- }
-
- /**
- * Creates an archive {@code target} by recursively including all
- * files and directories in {@code directory}.
- *
- * @param target the stream to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(ArchiveOutputStream target, File directory) throws IOException, ArchiveException {
- create(target, directory, ACCEPT_ALL);
- }
-
- /**
- * Creates an archive {@code target} by recursively including all
- * files and directories in {@code directory} that are accepted by
- * {@code filter}.
- *
- * @param target the stream to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @param filter selects the files and directories to include inside the archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(final ArchiveOutputStream target, File directory, FileFilter filter)
- throws IOException, ArchiveException {
- create(directory, filter, new ArchiveEntryCreator() {
- public ArchiveEntry create(File f, String entryName) throws IOException {
- return target.createArchiveEntry(f, entryName);
- }
- }, new ArchiveEntryConsumer() {
- public void accept(File source, ArchiveEntry e) throws IOException {
- target.putArchiveEntry(e);
- if (!e.isDirectory()) {
- try (InputStream in = new BufferedInputStream(new FileInputStream(source))) {
- IOUtils.copy(in, target);
- }
- }
- target.closeArchiveEntry();
- }
- }, new Finisher() {
- public void finish() throws IOException {
- target.finish();
- }
- });
- }
-
- /**
- * Creates an archive {@code target} by recursively including all
- * files and directories in {@code directory}.
- *
- * @param target the file to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- */
- public void create(final SevenZOutputFile target, File directory) throws IOException {
- create(target, directory, ACCEPT_ALL);
- }
-
- /**
- * Creates an archive {@code target} by recursively including all
- * files and directories in {@code directory} that are accepted by
- * {@code filter}.
- *
- * @param target the file to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @param filter selects the files and directories to include inside the archive.
- * @throws IOException if an I/O error occurs
- */
- public void create(final SevenZOutputFile target, File directory, FileFilter filter) throws IOException {
- create(directory, filter, new ArchiveEntryCreator() {
- public ArchiveEntry create(File f, String entryName) throws IOException {
- return target.createArchiveEntry(f, entryName);
- }
- }, new ArchiveEntryConsumer() {
- public void accept(File source, ArchiveEntry e) throws IOException {
- target.putArchiveEntry(e);
- if (!e.isDirectory()) {
- final byte[] buffer = new byte[8024];
- int n = 0;
- long count = 0;
- try (InputStream in = new BufferedInputStream(new FileInputStream(source))) {
- while (-1 != (n = in.read(buffer))) {
- target.write(buffer, 0, n);
- count += n;
- }
- }
- }
- target.closeArchiveEntry();
- }
- }, new Finisher() {
- public void finish() throws IOException {
- target.finish();
- }
- });
- }
-
- private boolean prefersSeekableByteChannel(String format) {
- return ArchiveStreamFactory.ZIP.equalsIgnoreCase(format) || ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format);
- }
-
- private void create(File directory, FileFilter filter, ArchiveEntryCreator creator, ArchiveEntryConsumer consumer,
- Finisher finisher) throws IOException {
- create("", directory, filter, creator, consumer);
- finisher.finish();
- }
-
- private void create(String prefix, File directory, FileFilter filter, ArchiveEntryCreator creator, ArchiveEntryConsumer consumer)
- throws IOException {
- File[] children = directory.listFiles(filter);
- if (children == null) {
- return;
- }
- for (File f : children) {
- String entryName = prefix + f.getName() + (f.isDirectory() ? "/" : "");
- consumer.accept(f, creator.create(f, entryName));
- if (f.isDirectory()) {
- create(entryName, f, filter, creator, consumer);
- }
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/EntryStreamOffsets.java b/src/org/apache/commons/compress/archivers/EntryStreamOffsets.java
deleted file mode 100644
index a73d079e293..00000000000
--- a/src/org/apache/commons/compress/archivers/EntryStreamOffsets.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers;
-
-
-/**
- * Provides information about ArchiveEntry stream offsets.
- */
-public interface EntryStreamOffsets {
-
- /** Special value indicating that the offset is unknown. */
- long OFFSET_UNKNOWN = -1;
-
- /**
- * Gets the offset of data stream within the archive file,
- *
- * @return
- * the offset of entry data stream, {@code OFFSET_UNKNOWN} if not known.
- */
- long getDataOffset();
-
- /**
- * Indicates whether the stream is contiguous, i.e. not split among
- * several archive parts, interspersed with control blocks, etc.
- *
- * @return
- * true if stream is contiguous, false otherwise.
- */
- boolean isStreamContiguous();
-}
diff --git a/src/org/apache/commons/compress/archivers/Expander.java b/src/org/apache/commons/compress/archivers/Expander.java
deleted file mode 100644
index 82b958ebac0..00000000000
--- a/src/org/apache/commons/compress/archivers/Expander.java
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers;
-
-import java.io.BufferedInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.Channels;
-import java.nio.channels.FileChannel;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.file.StandardOpenOption;
-import java.util.Enumeration;
-
-import org.apache.commons.compress.archivers.sevenz.SevenZFile;
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipFile;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Provides a high level API for expanding archives.
- * @since 1.17
- */
-public class Expander {
- /**
- * Used to filter the entries to be extracted.
- */
- public interface ArchiveEntryFilter {
- /**
- * @return true if the entry shall be expanded
- * @param entry the entry to test
- */
- boolean accept(ArchiveEntry entry);
- }
-
- private static final ArchiveEntryFilter ACCEPT_ALL = new ArchiveEntryFilter() {
- @Override
- public boolean accept(ArchiveEntry e) {
- return true;
- }
- };
-
- private interface ArchiveEntrySupplier {
- ArchiveEntry getNextReadableEntry() throws IOException;
- }
-
- private interface EntryWriter {
- void writeEntryDataTo(ArchiveEntry entry, OutputStream out) throws IOException;
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- *
Tries to auto-detect the archive's format.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(File archive, File targetDirectory) throws IOException, ArchiveException {
- expand(archive, targetDirectory, ACCEPT_ALL);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(String format, File archive, File targetDirectory) throws IOException, ArchiveException {
- expand(format, archive, targetDirectory, ACCEPT_ALL);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}, using
- * only the entries accepted by the {@code filter}.
- *
- *
Tries to auto-detect the archive's format.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param filter selects the entries to expand
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(File archive, File targetDirectory, ArchiveEntryFilter filter)
- throws IOException, ArchiveException {
- String format = null;
- try (InputStream i = new BufferedInputStream(new FileInputStream(archive))) {
- format = new ArchiveStreamFactory().detect(i);
- }
- expand(format, archive, targetDirectory, filter);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}, using
- * only the entries accepted by the {@code filter}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param filter selects the entries to expand
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(String format, File archive, File targetDirectory, ArchiveEntryFilter filter)
- throws IOException, ArchiveException {
- if (prefersSeekableByteChannel(format)) {
- try (SeekableByteChannel c = FileChannel.open(archive.toPath(), StandardOpenOption.READ)) {
- expand(format, c, targetDirectory, filter);
- }
- return;
- }
- try (InputStream i = new BufferedInputStream(new FileInputStream(archive))) {
- expand(format, i, targetDirectory, filter);
- }
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- *
Tries to auto-detect the archive's format.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(InputStream archive, File targetDirectory) throws IOException, ArchiveException {
- expand(archive, targetDirectory, ACCEPT_ALL);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(String format, InputStream archive, File targetDirectory)
- throws IOException, ArchiveException {
- expand(format, archive, targetDirectory, ACCEPT_ALL);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}, using
- * only the entries accepted by the {@code filter}.
- *
- *
Tries to auto-detect the archive's format.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param filter selects the entries to expand
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(InputStream archive, File targetDirectory, ArchiveEntryFilter filter)
- throws IOException, ArchiveException {
- expand(new ArchiveStreamFactory().createArchiveInputStream(archive), targetDirectory, filter);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}, using
- * only the entries accepted by the {@code filter}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param filter selects the entries to expand
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(String format, InputStream archive, File targetDirectory, ArchiveEntryFilter filter)
- throws IOException, ArchiveException {
- expand(new ArchiveStreamFactory().createArchiveInputStream(format, archive), targetDirectory, filter);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(String format, SeekableByteChannel archive, File targetDirectory)
- throws IOException, ArchiveException {
- expand(format, archive, targetDirectory, ACCEPT_ALL);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}, using
- * only the entries accepted by the {@code filter}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param filter selects the entries to expand
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(String format, SeekableByteChannel archive, File targetDirectory, ArchiveEntryFilter filter)
- throws IOException, ArchiveException {
- if (!prefersSeekableByteChannel(format)) {
- expand(format, Channels.newInputStream(archive), targetDirectory, filter);
- } else if (ArchiveStreamFactory.ZIP.equalsIgnoreCase(format)) {
- expand(new ZipFile(archive), targetDirectory, filter);
- } else if (ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format)) {
- expand(new SevenZFile(archive), targetDirectory, filter);
- } else {
- throw new ArchiveException("don't know how to handle format " + format);
- }
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(ArchiveInputStream archive, File targetDirectory)
- throws IOException, ArchiveException {
- expand(archive, targetDirectory, ACCEPT_ALL);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}, using
- * only the entries accepted by the {@code filter}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param filter selects the entries to expand
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(final ArchiveInputStream archive, File targetDirectory, ArchiveEntryFilter filter)
- throws IOException, ArchiveException {
- expand(new ArchiveEntrySupplier() {
- @Override
- public ArchiveEntry getNextReadableEntry() throws IOException {
- ArchiveEntry next = archive.getNextEntry();
- while (next != null && !archive.canReadEntryData(next)) {
- next = archive.getNextEntry();
- }
- return next;
- }
- }, new EntryWriter() {
- @Override
- public void writeEntryDataTo(ArchiveEntry entry, OutputStream out) throws IOException {
- IOUtils.copy(archive, out);
- }
- }, targetDirectory, filter);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(ZipFile archive, File targetDirectory)
- throws IOException, ArchiveException {
- expand(archive, targetDirectory, ACCEPT_ALL);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}, using
- * only the entries accepted by the {@code filter}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param filter selects the entries to expand
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(final ZipFile archive, File targetDirectory, ArchiveEntryFilter filter)
- throws IOException, ArchiveException {
- final Enumeration entries = archive.getEntries();
- expand(new ArchiveEntrySupplier() {
- @Override
- public ArchiveEntry getNextReadableEntry() throws IOException {
- ZipArchiveEntry next = entries.hasMoreElements() ? entries.nextElement() : null;
- while (next != null && !archive.canReadEntryData(next)) {
- next = entries.hasMoreElements() ? entries.nextElement() : null;
- }
- return next;
- }
- }, new EntryWriter() {
- @Override
- public void writeEntryDataTo(ArchiveEntry entry, OutputStream out) throws IOException {
- try (InputStream in = archive.getInputStream((ZipArchiveEntry) entry)) {
- IOUtils.copy(in, out);
- }
- }
- }, targetDirectory, filter);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(SevenZFile archive, File targetDirectory)
- throws IOException, ArchiveException {
- expand(archive, targetDirectory, ACCEPT_ALL);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}, using
- * only the entries accepted by the {@code filter}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param filter selects the entries to expand
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(final SevenZFile archive, File targetDirectory, ArchiveEntryFilter filter)
- throws IOException, ArchiveException {
- expand(new ArchiveEntrySupplier() {
- @Override
- public ArchiveEntry getNextReadableEntry() throws IOException {
- return archive.getNextEntry();
- }
- }, new EntryWriter() {
- @Override
- public void writeEntryDataTo(ArchiveEntry entry, OutputStream out) throws IOException {
- final byte[] buffer = new byte[8024];
- int n = 0;
- long count = 0;
- while (-1 != (n = archive.read(buffer))) {
- out.write(buffer, 0, n);
- count += n;
- }
- }
- }, targetDirectory, filter);
- }
-
- private boolean prefersSeekableByteChannel(String format) {
- return ArchiveStreamFactory.ZIP.equalsIgnoreCase(format) || ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format);
- }
-
- private void expand(ArchiveEntrySupplier supplier, EntryWriter writer, File targetDirectory, ArchiveEntryFilter filter)
- throws IOException {
- String targetDirPath = targetDirectory.getCanonicalPath();
- ArchiveEntry nextEntry = supplier.getNextReadableEntry();
- while (nextEntry != null) {
- if (!filter.accept(nextEntry)) {
- continue;
- }
- File f = new File(targetDirectory, nextEntry.getName());
- if (!f.getCanonicalPath().startsWith(targetDirPath)) {
- throw new IOException("expanding " + nextEntry.getName()
- + " would craete file outside of " + targetDirectory);
- }
- if (nextEntry.isDirectory()) {
- f.mkdirs();
- } else {
- f.getParentFile().mkdirs();
- try (OutputStream o = new FileOutputStream(f)) {
- writer.writeEntryDataTo(nextEntry, o);
- }
- }
- nextEntry = supplier.getNextReadableEntry();
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/Lister.java b/src/org/apache/commons/compress/archivers/Lister.java
deleted file mode 100644
index 07a8e9c0756..00000000000
--- a/src/org/apache/commons/compress/archivers/Lister.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.archivers;
-
-import java.io.BufferedInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Files;
-import org.apache.commons.compress.archivers.sevenz.SevenZFile;
-
-/**
- * Simple command line application that lists the contents of an archive.
- *
- *
The name of the archive must be given as a command line argument.
- *
The optional second argument defines the archive type, in case the format is not recognized.
- *
- * @since 1.1
- */
-public final class Lister {
- private static final ArchiveStreamFactory factory = new ArchiveStreamFactory();
-
- public static void main(final String[] args) throws Exception {
- if (args.length == 0) {
- usage();
- return;
- }
- System.out.println("Analysing " + args[0]);
- final File f = new File(args[0]);
- if (!f.isFile()) {
- System.err.println(f + " doesn't exist or is a directory");
- }
- String format = args.length > 1 ? args[1] : detectFormat(f);
- if (ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format)) {
- list7z(f);
- } else {
- listStream(f, args);
- }
- }
-
- private static void listStream(File f, String[] args) throws ArchiveException, IOException {
- try (final InputStream fis = new BufferedInputStream(Files.newInputStream(f.toPath()));
- final ArchiveInputStream ais = createArchiveInputStream(args, fis)) {
- System.out.println("Created " + ais.toString());
- ArchiveEntry ae;
- while ((ae = ais.getNextEntry()) != null) {
- System.out.println(ae.getName());
- }
- }
- }
-
- private static ArchiveInputStream createArchiveInputStream(final String[] args, final InputStream fis)
- throws ArchiveException {
- if (args.length > 1) {
- return factory.createArchiveInputStream(args[1], fis);
- }
- return factory.createArchiveInputStream(fis);
- }
-
- private static String detectFormat(File f) throws ArchiveException, IOException {
- try (final InputStream fis = new BufferedInputStream(Files.newInputStream(f.toPath()))) {
- return factory.detect(fis);
- }
- }
-
- private static void list7z(File f) throws ArchiveException, IOException {
- try (SevenZFile z = new SevenZFile(f)) {
- System.out.println("Created " + z.toString());
- ArchiveEntry ae;
- while ((ae = z.getNextEntry()) != null) {
- System.out.println(ae.getName());
- }
- }
- }
-
- private static void usage() {
- System.out.println("Parameters: archive-name [archive-type]");
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/StreamingNotSupportedException.java b/src/org/apache/commons/compress/archivers/StreamingNotSupportedException.java
deleted file mode 100644
index 9f12a7aff9c..00000000000
--- a/src/org/apache/commons/compress/archivers/StreamingNotSupportedException.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers;
-
-/**
- * Exception thrown by ArchiveStreamFactory if a format is requested/detected that doesn't support streaming.
- *
- * @since 1.8
- */
-public class StreamingNotSupportedException extends ArchiveException {
-
- private static final long serialVersionUID = 1L;
-
- private final String format;
-
- /**
- * Creates a new StreamingNotSupportedException.
- *
- * @param format the format that has been requested/detected.
- */
- public StreamingNotSupportedException(final String format) {
- super("The " + format + " doesn't support streaming.");
- this.format = format;
- }
-
- /**
- * Returns the format that has been requested/detected.
- *
- * @return the format that has been requested/detected.
- */
- public String getFormat() {
- return format;
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/ar/ArArchiveEntry.java b/src/org/apache/commons/compress/archivers/ar/ArArchiveEntry.java
deleted file mode 100644
index df9595ad249..00000000000
--- a/src/org/apache/commons/compress/archivers/ar/ArArchiveEntry.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.ar;
-
-import java.io.File;
-import java.util.Date;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-
-/**
- * Represents an archive entry in the "ar" format.
- *
- * Each AR archive starts with "!<arch>" followed by a LF. After these 8 bytes
- * the archive entries are listed. The format of an entry header is as it follows:
- *
- *
- * START BYTE END BYTE NAME FORMAT LENGTH
- * 0 15 File name ASCII 16
- * 16 27 Modification timestamp Decimal 12
- * 28 33 Owner ID Decimal 6
- * 34 39 Group ID Decimal 6
- * 40 47 File mode Octal 8
- * 48 57 File size (bytes) Decimal 10
- * 58 59 File magic \140\012 2
- *
- *
- * This specifies that an ar archive entry header contains 60 bytes.
- *
- * Due to the limitation of the file name length to 16 bytes GNU and
- * BSD has their own variants of this format. Currently Commons
- * Compress can read but not write the GNU variant. It fully supports
- * the BSD variant.
- *
- * @see ar man page
- *
- * @Immutable
- */
-public class ArArchiveEntry implements ArchiveEntry {
-
- /** The header for each entry */
- public static final String HEADER = "!\n";
-
- /** The trailer for each entry */
- public static final String TRAILER = "`\012";
-
- /**
- * SVR4/GNU adds a trailing / to names; BSD does not.
- * They also vary in how names longer than 16 characters are represented.
- * (Not yet fully supported by this implementation)
- */
- private final String name;
- private final int userId;
- private final int groupId;
- private final int mode;
- private static final int DEFAULT_MODE = 33188; // = (octal) 0100644
- private final long lastModified;
- private final long length;
-
- /**
- * Create a new instance using a couple of default values.
- *
- *
Sets userId and groupId to 0, the octal file mode to 644 and
- * the last modified time to the current time.
- *
- * @param name name of the entry
- * @param length length of the entry in bytes
- */
- public ArArchiveEntry(final String name, final long length) {
- this(name, length, 0, 0, DEFAULT_MODE,
- System.currentTimeMillis() / 1000);
- }
-
- /**
- * Create a new instance.
- *
- * @param name name of the entry
- * @param length length of the entry in bytes
- * @param userId numeric user id
- * @param groupId numeric group id
- * @param mode file mode
- * @param lastModified last modified time in seconds since the epoch
- */
- public ArArchiveEntry(final String name, final long length, final int userId, final int groupId,
- final int mode, final long lastModified) {
- this.name = name;
- this.length = length;
- this.userId = userId;
- this.groupId = groupId;
- this.mode = mode;
- this.lastModified = lastModified;
- }
-
- /**
- * Create a new instance using the attributes of the given file
- * @param inputFile the file to create an entry from
- * @param entryName the name of the entry
- */
- public ArArchiveEntry(final File inputFile, final String entryName) {
- // TODO sort out mode
- this(entryName, inputFile.isFile() ? inputFile.length() : 0,
- 0, 0, DEFAULT_MODE, inputFile.lastModified() / 1000);
- }
-
- @Override
- public long getSize() {
- return this.getLength();
- }
-
- @Override
- public String getName() {
- return name;
- }
-
- public int getUserId() {
- return userId;
- }
-
- public int getGroupId() {
- return groupId;
- }
-
- public int getMode() {
- return mode;
- }
-
- /**
- * Last modified time in seconds since the epoch.
- * @return the last modified date
- */
- public long getLastModified() {
- return lastModified;
- }
-
- @Override
- public Date getLastModifiedDate() {
- return new Date(1000 * getLastModified());
- }
-
- public long getLength() {
- return length;
- }
-
- @Override
- public boolean isDirectory() {
- return false;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + (name == null ? 0 : name.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
- final ArArchiveEntry other = (ArArchiveEntry) obj;
- if (name == null) {
- return other.name == null;
- } else {
- return name.equals(other.name);
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/ar/ArArchiveInputStream.java b/src/org/apache/commons/compress/archivers/ar/ArArchiveInputStream.java
deleted file mode 100644
index ddd122ede19..00000000000
--- a/src/org/apache/commons/compress/archivers/ar/ArArchiveInputStream.java
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.ar;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.utils.ArchiveUtils;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Implements the "ar" archive format as an input stream.
- *
- * @NotThreadSafe
- *
- */
-public class ArArchiveInputStream extends ArchiveInputStream {
-
- private final InputStream input;
- private long offset = 0;
- private boolean closed;
-
- /*
- * If getNextEnxtry has been called, the entry metadata is stored in
- * currentEntry.
- */
- private ArArchiveEntry currentEntry = null;
-
- // Storage area for extra long names (GNU ar)
- private byte[] namebuffer = null;
-
- /*
- * The offset where the current entry started. -1 if no entry has been
- * called
- */
- private long entryOffset = -1;
-
- // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection)
- private final byte[] nameBuf = new byte[16];
- private final byte[] lastModifiedBuf = new byte[12];
- private final byte[] idBuf = new byte[6];
- private final byte[] fileModeBuf = new byte[8];
- private final byte[] lengthBuf = new byte[10];
-
- /**
- * Constructs an Ar input stream with the referenced stream
- *
- * @param pInput
- * the ar input stream
- */
- public ArArchiveInputStream(final InputStream pInput) {
- input = pInput;
- closed = false;
- }
-
- /**
- * Returns the next AR entry in this stream.
- *
- * @return the next AR entry.
- * @throws IOException
- * if the entry could not be read
- */
- public ArArchiveEntry getNextArEntry() throws IOException {
- if (currentEntry != null) {
- final long entryEnd = entryOffset + currentEntry.getLength();
- IOUtils.skip(this, entryEnd - offset);
- currentEntry = null;
- }
-
- if (offset == 0) {
- final byte[] expected = ArchiveUtils.toAsciiBytes(ArArchiveEntry.HEADER);
- final byte[] realized = new byte[expected.length];
- final int read = IOUtils.readFully(this, realized);
- if (read != expected.length) {
- throw new IOException("failed to read header. Occured at byte: " + getBytesRead());
- }
- for (int i = 0; i < expected.length; i++) {
- if (expected[i] != realized[i]) {
- throw new IOException("invalid header " + ArchiveUtils.toAsciiString(realized));
- }
- }
- }
-
- if (offset % 2 != 0 && read() < 0) {
- // hit eof
- return null;
- }
-
- if (input.available() == 0) {
- return null;
- }
-
- IOUtils.readFully(this, nameBuf);
- IOUtils.readFully(this, lastModifiedBuf);
- IOUtils.readFully(this, idBuf);
- final int userId = asInt(idBuf, true);
- IOUtils.readFully(this, idBuf);
- IOUtils.readFully(this, fileModeBuf);
- IOUtils.readFully(this, lengthBuf);
-
- {
- final byte[] expected = ArchiveUtils.toAsciiBytes(ArArchiveEntry.TRAILER);
- final byte[] realized = new byte[expected.length];
- final int read = IOUtils.readFully(this, realized);
- if (read != expected.length) {
- throw new IOException("failed to read entry trailer. Occured at byte: " + getBytesRead());
- }
- for (int i = 0; i < expected.length; i++) {
- if (expected[i] != realized[i]) {
- throw new IOException("invalid entry trailer. not read the content? Occured at byte: " + getBytesRead());
- }
- }
- }
-
- entryOffset = offset;
-
-// GNU ar uses a '/' to mark the end of the filename; this allows for the use of spaces without the use of an extended filename.
-
- // entry name is stored as ASCII string
- String temp = ArchiveUtils.toAsciiString(nameBuf).trim();
- if (isGNUStringTable(temp)) { // GNU extended filenames entry
- currentEntry = readGNUStringTable(lengthBuf);
- return getNextArEntry();
- }
-
- long len = asLong(lengthBuf);
- if (temp.endsWith("/")) { // GNU terminator
- temp = temp.substring(0, temp.length() - 1);
- } else if (isGNULongName(temp)) {
- final int off = Integer.parseInt(temp.substring(1));// get the offset
- temp = getExtendedName(off); // convert to the long name
- } else if (isBSDLongName(temp)) {
- temp = getBSDLongName(temp);
- // entry length contained the length of the file name in
- // addition to the real length of the entry.
- // assume file name was ASCII, there is no "standard" otherwise
- final int nameLen = temp.length();
- len -= nameLen;
- entryOffset += nameLen;
- }
-
- currentEntry = new ArArchiveEntry(temp, len, userId,
- asInt(idBuf, true),
- asInt(fileModeBuf, 8),
- asLong(lastModifiedBuf));
- return currentEntry;
- }
-
- /**
- * Get an extended name from the GNU extended name buffer.
- *
- * @param offset pointer to entry within the buffer
- * @return the extended file name; without trailing "/" if present.
- * @throws IOException if name not found or buffer not set up
- */
- private String getExtendedName(final int offset) throws IOException {
- if (namebuffer == null) {
- throw new IOException("Cannot process GNU long filename as no // record was found");
- }
- for (int i = offset; i < namebuffer.length; i++) {
- if (namebuffer[i] == '\012' || namebuffer[i] == 0) {
- if (namebuffer[i - 1] == '/') {
- i--; // drop trailing /
- }
- return ArchiveUtils.toAsciiString(namebuffer, offset, i - offset);
- }
- }
- throw new IOException("Failed to read entry: " + offset);
- }
-
- private long asLong(final byte[] byteArray) {
- return Long.parseLong(ArchiveUtils.toAsciiString(byteArray).trim());
- }
-
- private int asInt(final byte[] byteArray) {
- return asInt(byteArray, 10, false);
- }
-
- private int asInt(final byte[] byteArray, final boolean treatBlankAsZero) {
- return asInt(byteArray, 10, treatBlankAsZero);
- }
-
- private int asInt(final byte[] byteArray, final int base) {
- return asInt(byteArray, base, false);
- }
-
- private int asInt(final byte[] byteArray, final int base, final boolean treatBlankAsZero) {
- final String string = ArchiveUtils.toAsciiString(byteArray).trim();
- if (string.length() == 0 && treatBlankAsZero) {
- return 0;
- }
- return Integer.parseInt(string, base);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.commons.compress.archivers.ArchiveInputStream#getNextEntry()
- */
- @Override
- public ArchiveEntry getNextEntry() throws IOException {
- return getNextArEntry();
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.io.InputStream#close()
- */
- @Override
- public void close() throws IOException {
- if (!closed) {
- closed = true;
- input.close();
- }
- currentEntry = null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.io.InputStream#read(byte[], int, int)
- */
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- int toRead = len;
- if (currentEntry != null) {
- final long entryEnd = entryOffset + currentEntry.getLength();
- if (len > 0 && entryEnd > offset) {
- toRead = (int) Math.min(len, entryEnd - offset);
- } else {
- return -1;
- }
- }
- final int ret = this.input.read(b, off, toRead);
- count(ret);
- offset += ret > 0 ? ret : 0;
- return ret;
- }
-
- /**
- * Checks if the signature matches ASCII "!<arch>" followed by a single LF
- * control character
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this stream is an Ar archive stream, false otherwise
- */
- public static boolean matches(final byte[] signature, final int length) {
- // 3c21 7261 6863 0a3e
-
- return length >= 8 && signature[0] == 0x21 &&
- signature[1] == 0x3c && signature[2] == 0x61 &&
- signature[3] == 0x72 && signature[4] == 0x63 &&
- signature[5] == 0x68 && signature[6] == 0x3e &&
- signature[7] == 0x0a;
- }
-
- static final String BSD_LONGNAME_PREFIX = "#1/";
- private static final int BSD_LONGNAME_PREFIX_LEN =
- BSD_LONGNAME_PREFIX.length();
- private static final String BSD_LONGNAME_PATTERN =
- "^" + BSD_LONGNAME_PREFIX + "\\d+";
-
- /**
- * Does the name look like it is a long name (or a name containing
- * spaces) as encoded by BSD ar?
- *
- *
From the FreeBSD ar(5) man page:
- *
- * BSD In the BSD variant, names that are shorter than 16
- * characters and without embedded spaces are stored
- * directly in this field. If a name has an embedded
- * space, or if it is longer than 16 characters, then
- * the string "#1/" followed by the decimal represen-
- * tation of the length of the file name is placed in
- * this field. The actual file name is stored immedi-
- * ately after the archive header. The content of the
- * archive member follows the file name. The ar_size
- * field of the header (see below) will then hold the
- * sum of the size of the file name and the size of
- * the member.
- *
- *
- * @since 1.3
- */
- private static boolean isBSDLongName(final String name) {
- return name != null && name.matches(BSD_LONGNAME_PATTERN);
- }
-
- /**
- * Reads the real name from the current stream assuming the very
- * first bytes to be read are the real file name.
- *
- * @see #isBSDLongName
- *
- * @since 1.3
- */
- private String getBSDLongName(final String bsdLongName) throws IOException {
- final int nameLen =
- Integer.parseInt(bsdLongName.substring(BSD_LONGNAME_PREFIX_LEN));
- final byte[] name = new byte[nameLen];
- final int read = IOUtils.readFully(this, name);
- if (read != nameLen) {
- throw new EOFException();
- }
- return ArchiveUtils.toAsciiString(name);
- }
-
- private static final String GNU_STRING_TABLE_NAME = "//";
-
- /**
- * Is this the name of the "Archive String Table" as used by
- * SVR4/GNU to store long file names?
- *
- *
GNU ar stores multiple extended filenames in the data section
- * of a file with the name "//", this record is referred to by
- * future headers.
- *
- *
A header references an extended filename by storing a "/"
- * followed by a decimal offset to the start of the filename in
- * the extended filename data section.
- *
- *
The format of the "//" file itself is simply a list of the
- * long filenames, each separated by one or more LF
- * characters. Note that the decimal offsets are number of
- * characters, not line or string number within the "//" file.
- */
- private static boolean isGNUStringTable(final String name) {
- return GNU_STRING_TABLE_NAME.equals(name);
- }
-
- /**
- * Reads the GNU archive String Table.
- *
- * @see #isGNUStringTable
- */
- private ArArchiveEntry readGNUStringTable(final byte[] length) throws IOException {
- final int bufflen = asInt(length); // Assume length will fit in an int
- namebuffer = new byte[bufflen];
- final int read = IOUtils.readFully(this, namebuffer, 0, bufflen);
- if (read != bufflen){
- throw new IOException("Failed to read complete // record: expected="
- + bufflen + " read=" + read);
- }
- return new ArArchiveEntry(GNU_STRING_TABLE_NAME, bufflen);
- }
-
- private static final String GNU_LONGNAME_PATTERN = "^/\\d+";
-
- /**
- * Does the name look like it is a long name (or a name containing
- * spaces) as encoded by SVR4/GNU ar?
- *
- * @see #isGNUStringTable
- */
- private boolean isGNULongName(final String name) {
- return name != null && name.matches(GNU_LONGNAME_PATTERN);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/ar/ArArchiveOutputStream.java b/src/org/apache/commons/compress/archivers/ar/ArArchiveOutputStream.java
deleted file mode 100644
index ffca90b383a..00000000000
--- a/src/org/apache/commons/compress/archivers/ar/ArArchiveOutputStream.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.ar;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.utils.ArchiveUtils;
-
-/**
- * Implements the "ar" archive format as an output stream.
- *
- * @NotThreadSafe
- */
-public class ArArchiveOutputStream extends ArchiveOutputStream {
- /** Fail if a long file name is required in the archive. */
- public static final int LONGFILE_ERROR = 0;
-
- /** BSD ar extensions are used to store long file names in the archive. */
- public static final int LONGFILE_BSD = 1;
-
- private final OutputStream out;
- private long entryOffset = 0;
- private ArArchiveEntry prevEntry;
- private boolean haveUnclosedEntry = false;
- private int longFileMode = LONGFILE_ERROR;
-
- /** indicates if this archive is finished */
- private boolean finished = false;
-
- public ArArchiveOutputStream( final OutputStream pOut ) {
- this.out = pOut;
- }
-
- /**
- * Set the long file mode.
- * This can be LONGFILE_ERROR(0) or LONGFILE_BSD(1).
- * This specifies the treatment of long file names (names >= 16).
- * Default is LONGFILE_ERROR.
- * @param longFileMode the mode to use
- * @since 1.3
- */
- public void setLongFileMode(final int longFileMode) {
- this.longFileMode = longFileMode;
- }
-
- private long writeArchiveHeader() throws IOException {
- final byte [] header = ArchiveUtils.toAsciiBytes(ArArchiveEntry.HEADER);
- out.write(header);
- return header.length;
- }
-
- @Override
- public void closeArchiveEntry() throws IOException {
- if(finished) {
- throw new IOException("Stream has already been finished");
- }
- if (prevEntry == null || !haveUnclosedEntry){
- throw new IOException("No current entry to close");
- }
- if (entryOffset % 2 != 0) {
- out.write('\n'); // Pad byte
- }
- haveUnclosedEntry = false;
- }
-
- @Override
- public void putArchiveEntry( final ArchiveEntry pEntry ) throws IOException {
- if(finished) {
- throw new IOException("Stream has already been finished");
- }
-
- final ArArchiveEntry pArEntry = (ArArchiveEntry)pEntry;
- if (prevEntry == null) {
- writeArchiveHeader();
- } else {
- if (prevEntry.getLength() != entryOffset) {
- throw new IOException("length does not match entry (" + prevEntry.getLength() + " != " + entryOffset);
- }
-
- if (haveUnclosedEntry) {
- closeArchiveEntry();
- }
- }
-
- prevEntry = pArEntry;
-
- writeEntryHeader(pArEntry);
-
- entryOffset = 0;
- haveUnclosedEntry = true;
- }
-
- private long fill( final long pOffset, final long pNewOffset, final char pFill ) throws IOException {
- final long diff = pNewOffset - pOffset;
-
- if (diff > 0) {
- for (int i = 0; i < diff; i++) {
- write(pFill);
- }
- }
-
- return pNewOffset;
- }
-
- private long write( final String data ) throws IOException {
- final byte[] bytes = data.getBytes("ascii");
- write(bytes);
- return bytes.length;
- }
-
- private long writeEntryHeader( final ArArchiveEntry pEntry ) throws IOException {
-
- long offset = 0;
- boolean mustAppendName = false;
-
- final String n = pEntry.getName();
- if (LONGFILE_ERROR == longFileMode && n.length() > 16) {
- throw new IOException("filename too long, > 16 chars: "+n);
- }
- if (LONGFILE_BSD == longFileMode &&
- (n.length() > 16 || n.contains(" "))) {
- mustAppendName = true;
- offset += write(ArArchiveInputStream.BSD_LONGNAME_PREFIX
- + String.valueOf(n.length()));
- } else {
- offset += write(n);
- }
-
- offset = fill(offset, 16, ' ');
- final String m = "" + pEntry.getLastModified();
- if (m.length() > 12) {
- throw new IOException("modified too long");
- }
- offset += write(m);
-
- offset = fill(offset, 28, ' ');
- final String u = "" + pEntry.getUserId();
- if (u.length() > 6) {
- throw new IOException("userid too long");
- }
- offset += write(u);
-
- offset = fill(offset, 34, ' ');
- final String g = "" + pEntry.getGroupId();
- if (g.length() > 6) {
- throw new IOException("groupid too long");
- }
- offset += write(g);
-
- offset = fill(offset, 40, ' ');
- final String fm = "" + Integer.toString(pEntry.getMode(), 8);
- if (fm.length() > 8) {
- throw new IOException("filemode too long");
- }
- offset += write(fm);
-
- offset = fill(offset, 48, ' ');
- final String s =
- String.valueOf(pEntry.getLength()
- + (mustAppendName ? n.length() : 0));
- if (s.length() > 10) {
- throw new IOException("size too long");
- }
- offset += write(s);
-
- offset = fill(offset, 58, ' ');
-
- offset += write(ArArchiveEntry.TRAILER);
-
- if (mustAppendName) {
- offset += write(n);
- }
-
- return offset;
- }
-
- @Override
- public void write(final byte[] b, final int off, final int len) throws IOException {
- out.write(b, off, len);
- count(len);
- entryOffset += len;
- }
-
- /**
- * Calls finish if necessary, and then closes the OutputStream
- */
- @Override
- public void close() throws IOException {
- if(!finished) {
- finish();
- }
- out.close();
- prevEntry = null;
- }
-
- @Override
- public ArchiveEntry createArchiveEntry(final File inputFile, final String entryName)
- throws IOException {
- if(finished) {
- throw new IOException("Stream has already been finished");
- }
- return new ArArchiveEntry(inputFile, entryName);
- }
-
- @Override
- public void finish() throws IOException {
- if(haveUnclosedEntry) {
- throw new IOException("This archive contains unclosed entries.");
- } else if(finished) {
- throw new IOException("This archive has already been finished");
- }
- finished = true;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/ar/package.html b/src/org/apache/commons/compress/archivers/ar/package.html
deleted file mode 100644
index 9c80f96e144..00000000000
--- a/src/org/apache/commons/compress/archivers/ar/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
Provides stream classes for reading and writing archives using
- the AR format.
-
-
diff --git a/src/org/apache/commons/compress/archivers/arj/ArjArchiveEntry.java b/src/org/apache/commons/compress/archivers/arj/ArjArchiveEntry.java
deleted file mode 100644
index ab847db856c..00000000000
--- a/src/org/apache/commons/compress/archivers/arj/ArjArchiveEntry.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.arj;
-
-import java.io.File;
-import java.util.Date;
-import java.util.regex.Matcher;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipUtil;
-
-/**
- * An entry in an ARJ archive.
- *
- * @NotThreadSafe
- * @since 1.6
- */
-public class ArjArchiveEntry implements ArchiveEntry {
- private final LocalFileHeader localFileHeader;
-
- public ArjArchiveEntry() {
- localFileHeader = new LocalFileHeader();
- }
-
- ArjArchiveEntry(final LocalFileHeader localFileHeader) {
- this.localFileHeader = localFileHeader;
- }
-
- /**
- * Get this entry's name.
- *
- *
This method returns the raw name as it is stored inside of the archive.
- *
- * @return This entry's name.
- */
- @Override
- public String getName() {
- if ((localFileHeader.arjFlags & LocalFileHeader.Flags.PATHSYM) != 0) {
- return localFileHeader.name.replaceAll("/",
- Matcher.quoteReplacement(File.separator));
- }
- return localFileHeader.name;
- }
-
- /**
- * Get this entry's file size.
- *
- * @return This entry's file size.
- */
- @Override
- public long getSize() {
- return localFileHeader.originalSize;
- }
-
- /** True if the entry refers to a directory.
- *
- * @return True if the entry refers to a directory
- */
- @Override
- public boolean isDirectory() {
- return localFileHeader.fileType == LocalFileHeader.FileTypes.DIRECTORY;
- }
-
- /**
- * The last modified date of the entry.
- *
- *
Note the interpretation of time is different depending on
- * the HostOS that has created the archive. While an OS that is
- * {@link #isHostOsUnix considered to be Unix} stores time in a
- * timezone independent manner, other platforms only use the local
- * time. I.e. if an archive has been created at midnight UTC on a
- * machine in timezone UTC this method will return midnight
- * regardless of timezone if the archive has been created on a
- * non-Unix system and a time taking the current timezone into
- * account if the archive has beeen created on Unix.
- *
- * @return the last modified date
- */
- @Override
- public Date getLastModifiedDate() {
- final long ts = isHostOsUnix() ? localFileHeader.dateTimeModified * 1000L
- : ZipUtil.dosToJavaTime(0xFFFFFFFFL & localFileHeader.dateTimeModified);
- return new Date(ts);
- }
-
- /**
- * File mode of this entry.
- *
- *
The format depends on the host os that created the entry.
- *
- * @return the file mode
- */
- public int getMode() {
- return localFileHeader.fileAccessMode;
- }
-
- /**
- * File mode of this entry as Unix stat value.
- *
- *
Will only be non-zero of the host os was UNIX.
- *
- * @return the Unix mode
- */
- public int getUnixMode() {
- return isHostOsUnix() ? getMode() : 0;
- }
-
- /**
- * The operating system the archive has been created on.
- * @see HostOs
- * @return the host OS code
- */
- public int getHostOs() {
- return localFileHeader.hostOS;
- }
-
- /**
- * Is the operating system the archive has been created on one
- * that is considered a UNIX OS by arj?
- * @return whether the operating system the archive has been
- * created on is considered a UNIX OS by arj
- */
- public boolean isHostOsUnix() {
- return getHostOs() == HostOs.UNIX || getHostOs() == HostOs.NEXT;
- }
-
- int getMethod() {
- return localFileHeader.method;
- }
-
- /**
- * The known values for HostOs.
- */
- public static class HostOs {
- public static final int DOS = 0;
- public static final int PRIMOS = 1;
- public static final int UNIX = 2;
- public static final int AMIGA = 3;
- public static final int MAC_OS = 4;
- public static final int OS_2 = 5;
- public static final int APPLE_GS = 6;
- public static final int ATARI_ST = 7;
- public static final int NEXT = 8;
- public static final int VAX_VMS = 9;
- public static final int WIN95 = 10;
- public static final int WIN32 = 11;
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/arj/ArjArchiveInputStream.java b/src/org/apache/commons/compress/archivers/arj/ArjArchiveInputStream.java
deleted file mode 100644
index c22d4c04785..00000000000
--- a/src/org/apache/commons/compress/archivers/arj/ArjArchiveInputStream.java
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.arj;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.zip.CRC32;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.utils.BoundedInputStream;
-import org.apache.commons.compress.utils.CRC32VerifyingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Implements the "arj" archive format as an InputStream.
- *
- * Reference
- * @NotThreadSafe
- * @since 1.6
- */
-public class ArjArchiveInputStream extends ArchiveInputStream {
- private static final int ARJ_MAGIC_1 = 0x60;
- private static final int ARJ_MAGIC_2 = 0xEA;
- private final DataInputStream in;
- private final String charsetName;
- private final MainHeader mainHeader;
- private LocalFileHeader currentLocalFileHeader = null;
- private InputStream currentInputStream = null;
-
- /**
- * Constructs the ArjInputStream, taking ownership of the inputStream that is passed in.
- * @param inputStream the underlying stream, whose ownership is taken
- * @param charsetName the charset used for file names and comments
- * in the archive. May be {@code null} to use the platform default.
- * @throws ArchiveException if an exception occurs while reading
- */
- public ArjArchiveInputStream(final InputStream inputStream,
- final String charsetName) throws ArchiveException {
- in = new DataInputStream(inputStream);
- this.charsetName = charsetName;
- try {
- mainHeader = readMainHeader();
- if ((mainHeader.arjFlags & MainHeader.Flags.GARBLED) != 0) {
- throw new ArchiveException("Encrypted ARJ files are unsupported");
- }
- if ((mainHeader.arjFlags & MainHeader.Flags.VOLUME) != 0) {
- throw new ArchiveException("Multi-volume ARJ files are unsupported");
- }
- } catch (final IOException ioException) {
- throw new ArchiveException(ioException.getMessage(), ioException);
- }
- }
-
- /**
- * Constructs the ArjInputStream, taking ownership of the inputStream that is passed in,
- * and using the CP437 character encoding.
- * @param inputStream the underlying stream, whose ownership is taken
- * @throws ArchiveException if an exception occurs while reading
- */
- public ArjArchiveInputStream(final InputStream inputStream)
- throws ArchiveException {
- this(inputStream, "CP437");
- }
-
- @Override
- public void close() throws IOException {
- in.close();
- }
-
- private int read8(final DataInputStream dataIn) throws IOException {
- final int value = dataIn.readUnsignedByte();
- count(1);
- return value;
- }
-
- private int read16(final DataInputStream dataIn) throws IOException {
- final int value = dataIn.readUnsignedShort();
- count(2);
- return Integer.reverseBytes(value) >>> 16;
- }
-
- private int read32(final DataInputStream dataIn) throws IOException {
- final int value = dataIn.readInt();
- count(4);
- return Integer.reverseBytes(value);
- }
-
- private String readString(final DataInputStream dataIn) throws IOException {
- final ByteArrayOutputStream buffer = new ByteArrayOutputStream();
- int nextByte;
- while ((nextByte = dataIn.readUnsignedByte()) != 0) {
- buffer.write(nextByte);
- }
- if (charsetName != null) {
- return new String(buffer.toByteArray(), charsetName);
- }
- // intentionally using the default encoding as that's the contract for a null charsetName
- return new String(buffer.toByteArray());
- }
-
- private void readFully(final DataInputStream dataIn, final byte[] b)
- throws IOException {
- dataIn.readFully(b);
- count(b.length);
- }
-
- private byte[] readHeader() throws IOException {
- boolean found = false;
- byte[] basicHeaderBytes = null;
- do {
- int first = 0;
- int second = read8(in);
- do {
- first = second;
- second = read8(in);
- } while (first != ARJ_MAGIC_1 && second != ARJ_MAGIC_2);
- final int basicHeaderSize = read16(in);
- if (basicHeaderSize == 0) {
- // end of archive
- return null;
- }
- if (basicHeaderSize <= 2600) {
- basicHeaderBytes = new byte[basicHeaderSize];
- readFully(in, basicHeaderBytes);
- final long basicHeaderCrc32 = read32(in) & 0xFFFFFFFFL;
- final CRC32 crc32 = new CRC32();
- crc32.update(basicHeaderBytes);
- if (basicHeaderCrc32 == crc32.getValue()) {
- found = true;
- }
- }
- } while (!found);
- return basicHeaderBytes;
- }
-
- private MainHeader readMainHeader() throws IOException {
- final byte[] basicHeaderBytes = readHeader();
- if (basicHeaderBytes == null) {
- throw new IOException("Archive ends without any headers");
- }
- final DataInputStream basicHeader = new DataInputStream(
- new ByteArrayInputStream(basicHeaderBytes));
-
- final int firstHeaderSize = basicHeader.readUnsignedByte();
- final byte[] firstHeaderBytes = new byte[firstHeaderSize - 1];
- basicHeader.readFully(firstHeaderBytes);
- final DataInputStream firstHeader = new DataInputStream(
- new ByteArrayInputStream(firstHeaderBytes));
-
- final MainHeader hdr = new MainHeader();
- hdr.archiverVersionNumber = firstHeader.readUnsignedByte();
- hdr.minVersionToExtract = firstHeader.readUnsignedByte();
- hdr.hostOS = firstHeader.readUnsignedByte();
- hdr.arjFlags = firstHeader.readUnsignedByte();
- hdr.securityVersion = firstHeader.readUnsignedByte();
- hdr.fileType = firstHeader.readUnsignedByte();
- hdr.reserved = firstHeader.readUnsignedByte();
- hdr.dateTimeCreated = read32(firstHeader);
- hdr.dateTimeModified = read32(firstHeader);
- hdr.archiveSize = 0xffffFFFFL & read32(firstHeader);
- hdr.securityEnvelopeFilePosition = read32(firstHeader);
- hdr.fileSpecPosition = read16(firstHeader);
- hdr.securityEnvelopeLength = read16(firstHeader);
- pushedBackBytes(20); // count has already counted them via readFully
- hdr.encryptionVersion = firstHeader.readUnsignedByte();
- hdr.lastChapter = firstHeader.readUnsignedByte();
-
- if (firstHeaderSize >= 33) {
- hdr.arjProtectionFactor = firstHeader.readUnsignedByte();
- hdr.arjFlags2 = firstHeader.readUnsignedByte();
- firstHeader.readUnsignedByte();
- firstHeader.readUnsignedByte();
- }
-
- hdr.name = readString(basicHeader);
- hdr.comment = readString(basicHeader);
-
- final int extendedHeaderSize = read16(in);
- if (extendedHeaderSize > 0) {
- hdr.extendedHeaderBytes = new byte[extendedHeaderSize];
- readFully(in, hdr.extendedHeaderBytes);
- final long extendedHeaderCrc32 = 0xffffFFFFL & read32(in);
- final CRC32 crc32 = new CRC32();
- crc32.update(hdr.extendedHeaderBytes);
- if (extendedHeaderCrc32 != crc32.getValue()) {
- throw new IOException("Extended header CRC32 verification failure");
- }
- }
-
- return hdr;
- }
-
- private LocalFileHeader readLocalFileHeader() throws IOException {
- final byte[] basicHeaderBytes = readHeader();
- if (basicHeaderBytes == null) {
- return null;
- }
- try (final DataInputStream basicHeader = new DataInputStream(new ByteArrayInputStream(basicHeaderBytes))) {
-
- final int firstHeaderSize = basicHeader.readUnsignedByte();
- final byte[] firstHeaderBytes = new byte[firstHeaderSize - 1];
- basicHeader.readFully(firstHeaderBytes);
- try (final DataInputStream firstHeader = new DataInputStream(new ByteArrayInputStream(firstHeaderBytes))) {
-
- final LocalFileHeader localFileHeader = new LocalFileHeader();
- localFileHeader.archiverVersionNumber = firstHeader.readUnsignedByte();
- localFileHeader.minVersionToExtract = firstHeader.readUnsignedByte();
- localFileHeader.hostOS = firstHeader.readUnsignedByte();
- localFileHeader.arjFlags = firstHeader.readUnsignedByte();
- localFileHeader.method = firstHeader.readUnsignedByte();
- localFileHeader.fileType = firstHeader.readUnsignedByte();
- localFileHeader.reserved = firstHeader.readUnsignedByte();
- localFileHeader.dateTimeModified = read32(firstHeader);
- localFileHeader.compressedSize = 0xffffFFFFL & read32(firstHeader);
- localFileHeader.originalSize = 0xffffFFFFL & read32(firstHeader);
- localFileHeader.originalCrc32 = 0xffffFFFFL & read32(firstHeader);
- localFileHeader.fileSpecPosition = read16(firstHeader);
- localFileHeader.fileAccessMode = read16(firstHeader);
- pushedBackBytes(20);
- localFileHeader.firstChapter = firstHeader.readUnsignedByte();
- localFileHeader.lastChapter = firstHeader.readUnsignedByte();
-
- readExtraData(firstHeaderSize, firstHeader, localFileHeader);
-
- localFileHeader.name = readString(basicHeader);
- localFileHeader.comment = readString(basicHeader);
-
- final ArrayList extendedHeaders = new ArrayList<>();
- int extendedHeaderSize;
- while ((extendedHeaderSize = read16(in)) > 0) {
- final byte[] extendedHeaderBytes = new byte[extendedHeaderSize];
- readFully(in, extendedHeaderBytes);
- final long extendedHeaderCrc32 = 0xffffFFFFL & read32(in);
- final CRC32 crc32 = new CRC32();
- crc32.update(extendedHeaderBytes);
- if (extendedHeaderCrc32 != crc32.getValue()) {
- throw new IOException("Extended header CRC32 verification failure");
- }
- extendedHeaders.add(extendedHeaderBytes);
- }
- localFileHeader.extendedHeaders = extendedHeaders.toArray(new byte[extendedHeaders.size()][]);
-
- return localFileHeader;
- }
- }
- }
-
- private void readExtraData(final int firstHeaderSize, final DataInputStream firstHeader,
- final LocalFileHeader localFileHeader) throws IOException {
- if (firstHeaderSize >= 33) {
- localFileHeader.extendedFilePosition = read32(firstHeader);
- if (firstHeaderSize >= 45) {
- localFileHeader.dateTimeAccessed = read32(firstHeader);
- localFileHeader.dateTimeCreated = read32(firstHeader);
- localFileHeader.originalSizeEvenForVolumes = read32(firstHeader);
- pushedBackBytes(12);
- }
- pushedBackBytes(4);
- }
- }
-
- /**
- * Checks if the signature matches what is expected for an arj file.
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this stream is an arj archive stream, false otherwise
- */
- public static boolean matches(final byte[] signature, final int length) {
- return length >= 2 &&
- (0xff & signature[0]) == ARJ_MAGIC_1 &&
- (0xff & signature[1]) == ARJ_MAGIC_2;
- }
-
- /**
- * Gets the archive's recorded name.
- * @return the archive's name
- */
- public String getArchiveName() {
- return mainHeader.name;
- }
-
- /**
- * Gets the archive's comment.
- * @return the archive's comment
- */
- public String getArchiveComment() {
- return mainHeader.comment;
- }
-
- @Override
- public ArjArchiveEntry getNextEntry() throws IOException {
- if (currentInputStream != null) {
- // return value ignored as IOUtils.skip ensures the stream is drained completely
- IOUtils.skip(currentInputStream, Long.MAX_VALUE);
- currentInputStream.close();
- currentLocalFileHeader = null;
- currentInputStream = null;
- }
-
- currentLocalFileHeader = readLocalFileHeader();
- if (currentLocalFileHeader != null) {
- currentInputStream = new BoundedInputStream(in, currentLocalFileHeader.compressedSize);
- if (currentLocalFileHeader.method == LocalFileHeader.Methods.STORED) {
- currentInputStream = new CRC32VerifyingInputStream(currentInputStream,
- currentLocalFileHeader.originalSize, currentLocalFileHeader.originalCrc32);
- }
- return new ArjArchiveEntry(currentLocalFileHeader);
- }
- currentInputStream = null;
- return null;
- }
-
- @Override
- public boolean canReadEntryData(final ArchiveEntry ae) {
- return ae instanceof ArjArchiveEntry
- && ((ArjArchiveEntry) ae).getMethod() == LocalFileHeader.Methods.STORED;
- }
-
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- if (currentLocalFileHeader == null) {
- throw new IllegalStateException("No current arj entry");
- }
- if (currentLocalFileHeader.method != LocalFileHeader.Methods.STORED) {
- throw new IOException("Unsupported compression method " + currentLocalFileHeader.method);
- }
- return currentInputStream.read(b, off, len);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/arj/LocalFileHeader.java b/src/org/apache/commons/compress/archivers/arj/LocalFileHeader.java
deleted file mode 100644
index 6ecb297becd..00000000000
--- a/src/org/apache/commons/compress/archivers/arj/LocalFileHeader.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.arj;
-
-import java.util.Arrays;
-
-class LocalFileHeader {
- int archiverVersionNumber;
- int minVersionToExtract;
- int hostOS;
- int arjFlags;
- int method;
- int fileType;
- int reserved;
- int dateTimeModified;
- long compressedSize;
- long originalSize;
- long originalCrc32;
- int fileSpecPosition;
- int fileAccessMode;
- int firstChapter;
- int lastChapter;
-
- int extendedFilePosition;
- int dateTimeAccessed;
- int dateTimeCreated;
- int originalSizeEvenForVolumes;
-
- String name;
- String comment;
-
- byte[][] extendedHeaders = null;
-
- static class Flags {
- static final int GARBLED = 0x01;
- static final int VOLUME = 0x04;
- static final int EXTFILE = 0x08;
- static final int PATHSYM = 0x10;
- static final int BACKUP = 0x20;
- }
-
- static class FileTypes {
- static final int BINARY = 0;
- static final int SEVEN_BIT_TEXT = 1;
- static final int DIRECTORY = 3;
- static final int VOLUME_LABEL = 4;
- static final int CHAPTER_LABEL = 5;
- }
-
- static class Methods {
- static final int STORED = 0;
- static final int COMPRESSED_MOST = 1;
- static final int COMPRESSED_FASTEST = 4;
- static final int NO_DATA_NO_CRC = 8;
- static final int NO_DATA = 9;
- }
-
- @Override
- public String toString() {
- final StringBuilder builder = new StringBuilder();
- builder.append("LocalFileHeader [archiverVersionNumber=");
- builder.append(archiverVersionNumber);
- builder.append(", minVersionToExtract=");
- builder.append(minVersionToExtract);
- builder.append(", hostOS=");
- builder.append(hostOS);
- builder.append(", arjFlags=");
- builder.append(arjFlags);
- builder.append(", method=");
- builder.append(method);
- builder.append(", fileType=");
- builder.append(fileType);
- builder.append(", reserved=");
- builder.append(reserved);
- builder.append(", dateTimeModified=");
- builder.append(dateTimeModified);
- builder.append(", compressedSize=");
- builder.append(compressedSize);
- builder.append(", originalSize=");
- builder.append(originalSize);
- builder.append(", originalCrc32=");
- builder.append(originalCrc32);
- builder.append(", fileSpecPosition=");
- builder.append(fileSpecPosition);
- builder.append(", fileAccessMode=");
- builder.append(fileAccessMode);
- builder.append(", firstChapter=");
- builder.append(firstChapter);
- builder.append(", lastChapter=");
- builder.append(lastChapter);
- builder.append(", extendedFilePosition=");
- builder.append(extendedFilePosition);
- builder.append(", dateTimeAccessed=");
- builder.append(dateTimeAccessed);
- builder.append(", dateTimeCreated=");
- builder.append(dateTimeCreated);
- builder.append(", originalSizeEvenForVolumes=");
- builder.append(originalSizeEvenForVolumes);
- builder.append(", name=");
- builder.append(name);
- builder.append(", comment=");
- builder.append(comment);
- builder.append(", extendedHeaders=");
- builder.append(Arrays.toString(extendedHeaders));
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/arj/MainHeader.java b/src/org/apache/commons/compress/archivers/arj/MainHeader.java
deleted file mode 100644
index 7a9f212a28b..00000000000
--- a/src/org/apache/commons/compress/archivers/arj/MainHeader.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.arj;
-
-import java.util.Arrays;
-
-class MainHeader {
- int archiverVersionNumber;
- int minVersionToExtract;
- int hostOS;
- int arjFlags;
- int securityVersion;
- int fileType;
- int reserved;
- int dateTimeCreated;
- int dateTimeModified;
- long archiveSize;
- int securityEnvelopeFilePosition;
- int fileSpecPosition;
- int securityEnvelopeLength;
- int encryptionVersion;
- int lastChapter;
- int arjProtectionFactor;
- int arjFlags2;
- String name;
- String comment;
- byte[] extendedHeaderBytes = null;
-
- static class Flags {
- static final int GARBLED = 0x01;
- static final int OLD_SECURED_NEW_ANSI_PAGE = 0x02;
- static final int VOLUME = 0x04;
- static final int ARJPROT = 0x08;
- static final int PATHSYM = 0x10;
- static final int BACKUP = 0x20;
- static final int SECURED = 0x40;
- static final int ALTNAME = 0x80;
- }
-
-
- @Override
- public String toString() {
- final StringBuilder builder = new StringBuilder();
- builder.append("MainHeader [archiverVersionNumber=");
- builder.append(archiverVersionNumber);
- builder.append(", minVersionToExtract=");
- builder.append(minVersionToExtract);
- builder.append(", hostOS=");
- builder.append(hostOS);
- builder.append(", arjFlags=");
- builder.append(arjFlags);
- builder.append(", securityVersion=");
- builder.append(securityVersion);
- builder.append(", fileType=");
- builder.append(fileType);
- builder.append(", reserved=");
- builder.append(reserved);
- builder.append(", dateTimeCreated=");
- builder.append(dateTimeCreated);
- builder.append(", dateTimeModified=");
- builder.append(dateTimeModified);
- builder.append(", archiveSize=");
- builder.append(archiveSize);
- builder.append(", securityEnvelopeFilePosition=");
- builder.append(securityEnvelopeFilePosition);
- builder.append(", fileSpecPosition=");
- builder.append(fileSpecPosition);
- builder.append(", securityEnvelopeLength=");
- builder.append(securityEnvelopeLength);
- builder.append(", encryptionVersion=");
- builder.append(encryptionVersion);
- builder.append(", lastChapter=");
- builder.append(lastChapter);
- builder.append(", arjProtectionFactor=");
- builder.append(arjProtectionFactor);
- builder.append(", arjFlags2=");
- builder.append(arjFlags2);
- builder.append(", name=");
- builder.append(name);
- builder.append(", comment=");
- builder.append(comment);
- builder.append(", extendedHeaderBytes=");
- builder.append(Arrays.toString(extendedHeaderBytes));
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/arj/package.html b/src/org/apache/commons/compress/archivers/arj/package.html
deleted file mode 100644
index de18f61d884..00000000000
--- a/src/org/apache/commons/compress/archivers/arj/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
Provides stream classes for reading archives using
- the ARJ format.
-
-
diff --git a/src/org/apache/commons/compress/archivers/cpio/CpioArchiveEntry.java b/src/org/apache/commons/compress/archivers/cpio/CpioArchiveEntry.java
deleted file mode 100644
index 28e58238cc4..00000000000
--- a/src/org/apache/commons/compress/archivers/cpio/CpioArchiveEntry.java
+++ /dev/null
@@ -1,896 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.cpio;
-
-import java.io.File;
-import java.util.Date;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-
-/**
- * A cpio archive consists of a sequence of files. There are several types of
- * headers defided in two categories of new and old format. The headers are
- * recognized by magic numbers:
- *
- *
- *
"070701" ASCII for new portable format
- *
"070702" ASCII for new portable format with CRC
- *
"070707" ASCII for old ascii (also known as Portable ASCII, odc or old
- * character format
- *
070707 binary for old binary
- *
- *
- *
The old binary format is limited to 16 bits for user id, group
- * id, device, and inode numbers. It is limited to 4 gigabyte file
- * sizes.
- *
- * The old ASCII format is limited to 18 bits for the user id, group
- * id, device, and inode numbers. It is limited to 8 gigabyte file
- * sizes.
- *
- * The new ASCII format is limited to 4 gigabyte file sizes.
- *
- * CPIO 2.5 knows also about tar, but it is not recognized here.
- *
- *
- *
OLD FORMAT
- *
- *
Each file has a 76 (ascii) / 26 (binary) byte header, a variable
- * length, NUL terminated filename, and variable length file data. A
- * header for a filename "TRAILER!!!" indicates the end of the
- * archive.
- *
- *
All the fields in the header are ISO 646 (approximately ASCII)
- * strings of octal numbers, left padded, not NUL terminated.
- *
- *
- * FIELDNAME NOTES
- * c_magic The integer value octal 070707. This value can be used to deter-
- * mine whether this archive is written with little-endian or big-
- * endian integers.
- * c_dev Device that contains a directory entry for this file
- * c_ino I-node number that identifies the input file to the file system
- * c_mode The mode specifies both the regular permissions and the file type.
- * c_uid Numeric User ID of the owner of the input file
- * c_gid Numeric Group ID of the owner of the input file
- * c_nlink Number of links that are connected to the input file
- * c_rdev For block special and character special entries, this field
- * contains the associated device number. For all other entry types,
- * it should be set to zero by writers and ignored by readers.
- * c_mtime[2] Modification time of the file, indicated as the number of seconds
- * since the start of the epoch, 00:00:00 UTC January 1, 1970. The
- * four-byte integer is stored with the most-significant 16 bits
- * first followed by the least-significant 16 bits. Each of the two
- * 16 bit values are stored in machine-native byte order.
- * c_namesize Length of the path name, including the terminating null byte
- * c_filesize[2] Length of the file in bytes. This is the length of the data
- * section that follows the header structure. Must be 0 for
- * FIFOs and directories
- *
- * All fields are unsigned short fields with 16-bit integer values
- * apart from c_mtime and c_filesize which are 32-bit integer values
- *
- *
- *
If necessary, the filename and file data are padded with a NUL byte to an even length
- *
- *
Special files, directories, and the trailer are recorded with
- * the h_filesize field equal to 0.
- *
- *
In the ASCII version of this format, the 16-bit entries are represented as 6-byte octal numbers,
- * and the 32-bit entries are represented as 11-byte octal numbers. No padding is added.
- *
- *
NEW FORMAT
- *
- *
Each file has a 110 byte header, a variable length, NUL
- * terminated filename, and variable length file data. A header for a
- * filename "TRAILER!!!" indicates the end of the archive. All the
- * fields in the header are ISO 646 (approximately ASCII) strings of
- * hexadecimal numbers, left padded, not NUL terminated.
- *
- *
- * FIELDNAME NOTES
- * c_magic[6] The string 070701 for new ASCII, the string 070702 for new ASCII with CRC
- * c_ino[8]
- * c_mode[8]
- * c_uid[8]
- * c_gid[8]
- * c_nlink[8]
- * c_mtim[8]
- * c_filesize[8] must be 0 for FIFOs and directories
- * c_maj[8]
- * c_min[8]
- * c_rmaj[8] only valid for chr and blk special files
- * c_rmin[8] only valid for chr and blk special files
- * c_namesize[8] count includes terminating NUL in pathname
- * c_check[8] 0 for "new" portable format; for CRC format
- * the sum of all the bytes in the file
- *
- *
- *
New ASCII Format The "new" ASCII format uses 8-byte hexadecimal
- * fields for all numbers and separates device numbers into separate
- * fields for major and minor numbers.
- *
- *
The pathname is followed by NUL bytes so that the total size of
- * the fixed header plus pathname is a multiple of four. Likewise, the
- * file data is padded to a multiple of four bytes.
- *
- *
This class uses mutable fields and is not considered to be
- * threadsafe.
- *
- *
Based on code from the jRPM project (http://jrpm.sourceforge.net).
- *
- *
The MAGIC numbers and other constants are defined in {@link CpioConstants}
- *
- *
- * N.B. does not handle the cpio "tar" format
- *
- * @NotThreadSafe
- * @see https://people.freebsd.org/~kientzle/libarchive/man/cpio.5.txt
- */
-public class CpioArchiveEntry implements CpioConstants, ArchiveEntry {
-
- // Header description fields - should be same throughout an archive
-
- /**
- * See constructor documenation for possible values.
- */
- private final short fileFormat;
-
- /** The number of bytes in each header record; depends on the file format */
- private final int headerSize;
-
- /** The boundary to which the header and data elements are aligned: 0, 2 or 4 bytes */
- private final int alignmentBoundary;
-
- // Header fields
-
- private long chksum = 0;
-
- /** Number of bytes in the file */
- private long filesize = 0;
-
- private long gid = 0;
-
- private long inode = 0;
-
- private long maj = 0;
-
- private long min = 0;
-
- private long mode = 0;
-
- private long mtime = 0;
-
- private String name;
-
- private long nlink = 0;
-
- private long rmaj = 0;
-
- private long rmin = 0;
-
- private long uid = 0;
-
- /**
- * Creates a CpioArchiveEntry with a specified format.
- *
- * @param format
- * The cpio format for this entry.
- *
- */
- public CpioArchiveEntry(final short format) {
- switch (format) {
- case FORMAT_NEW:
- this.headerSize = 110;
- this.alignmentBoundary = 4;
- break;
- case FORMAT_NEW_CRC:
- this.headerSize = 110;
- this.alignmentBoundary = 4;
- break;
- case FORMAT_OLD_ASCII:
- this.headerSize = 76;
- this.alignmentBoundary = 0;
- break;
- case FORMAT_OLD_BINARY:
- this.headerSize = 26;
- this.alignmentBoundary = 2;
- break;
- default:
- throw new IllegalArgumentException("Unknown header type");
- }
- this.fileFormat = format;
- }
-
- /**
- * Creates a CpioArchiveEntry with a specified name. The format of
- * this entry will be the new format.
- *
- * @param name
- * The name of this entry.
- */
- public CpioArchiveEntry(final String name) {
- this(FORMAT_NEW, name);
- }
-
- /**
- * Creates a CpioArchiveEntry with a specified name.
- *
- * @param format
- * The cpio format for this entry.
- * @param name
- * The name of this entry.
- *
- *
- * @since 1.1
- */
- public CpioArchiveEntry(final short format, final String name) {
- this(format);
- this.name = name;
- }
-
- /**
- * Creates a CpioArchiveEntry with a specified name. The format of
- * this entry will be the new format.
- *
- * @param name
- * The name of this entry.
- * @param size
- * The size of this entry
- */
- public CpioArchiveEntry(final String name, final long size) {
- this(name);
- this.setSize(size);
- }
-
- /**
- * Creates a CpioArchiveEntry with a specified name.
- *
- * @param format
- * The cpio format for this entry.
- * @param name
- * The name of this entry.
- * @param size
- * The size of this entry
- *
- *
- * @since 1.1
- */
- public CpioArchiveEntry(final short format, final String name,
- final long size) {
- this(format, name);
- this.setSize(size);
- }
-
- /**
- * Creates a CpioArchiveEntry with a specified name for a
- * specified file. The format of this entry will be the new
- * format.
- *
- * @param inputFile
- * The file to gather information from.
- * @param entryName
- * The name of this entry.
- */
- public CpioArchiveEntry(final File inputFile, final String entryName) {
- this(FORMAT_NEW, inputFile, entryName);
- }
-
- /**
- * Creates a CpioArchiveEntry with a specified name for a
- * specified file.
- *
- * @param format
- * The cpio format for this entry.
- * @param inputFile
- * The file to gather information from.
- * @param entryName
- * The name of this entry.
- *
- *
- * @since 1.1
- */
- public CpioArchiveEntry(final short format, final File inputFile,
- final String entryName) {
- this(format, entryName, inputFile.isFile() ? inputFile.length() : 0);
- if (inputFile.isDirectory()){
- setMode(C_ISDIR);
- } else if (inputFile.isFile()){
- setMode(C_ISREG);
- } else {
- throw new IllegalArgumentException("Cannot determine type of file "
- + inputFile.getName());
- }
- // TODO set other fields as needed
- setTime(inputFile.lastModified() / 1000);
- }
-
- /**
- * Check if the method is allowed for the defined format.
- */
- private void checkNewFormat() {
- if ((this.fileFormat & FORMAT_NEW_MASK) == 0) {
- throw new UnsupportedOperationException();
- }
- }
-
- /**
- * Check if the method is allowed for the defined format.
- */
- private void checkOldFormat() {
- if ((this.fileFormat & FORMAT_OLD_MASK) == 0) {
- throw new UnsupportedOperationException();
- }
- }
-
- /**
- * Get the checksum.
- * Only supported for the new formats.
- *
- * @return Returns the checksum.
- * @throws UnsupportedOperationException if the format is not a new format
- */
- public long getChksum() {
- checkNewFormat();
- return this.chksum & 0xFFFFFFFFL;
- }
-
- /**
- * Get the device id.
- *
- * @return Returns the device id.
- * @throws UnsupportedOperationException
- * if this method is called for a CpioArchiveEntry with a new
- * format.
- */
- public long getDevice() {
- checkOldFormat();
- return this.min;
- }
-
- /**
- * Get the major device id.
- *
- * @return Returns the major device id.
- * @throws UnsupportedOperationException
- * if this method is called for a CpioArchiveEntry with an old
- * format.
- */
- public long getDeviceMaj() {
- checkNewFormat();
- return this.maj;
- }
-
- /**
- * Get the minor device id
- *
- * @return Returns the minor device id.
- * @throws UnsupportedOperationException if format is not a new format
- */
- public long getDeviceMin() {
- checkNewFormat();
- return this.min;
- }
-
- /**
- * Get the filesize.
- *
- * @return Returns the filesize.
- * @see org.apache.commons.compress.archivers.ArchiveEntry#getSize()
- */
- @Override
- public long getSize() {
- return this.filesize;
- }
-
- /**
- * Get the format for this entry.
- *
- * @return Returns the format.
- */
- public short getFormat() {
- return this.fileFormat;
- }
-
- /**
- * Get the group id.
- *
- * @return Returns the group id.
- */
- public long getGID() {
- return this.gid;
- }
-
- /**
- * Get the header size for this CPIO format
- *
- * @return Returns the header size in bytes.
- */
- public int getHeaderSize() {
- return this.headerSize;
- }
-
- /**
- * Get the alignment boundary for this CPIO format
- *
- * @return Returns the aligment boundary (0, 2, 4) in bytes
- */
- public int getAlignmentBoundary() {
- return this.alignmentBoundary;
- }
-
- /**
- * Get the number of bytes needed to pad the header to the alignment boundary.
- *
- * @return the number of bytes needed to pad the header (0,1,2,3)
- */
- public int getHeaderPadCount(){
- if (this.alignmentBoundary == 0) { return 0; }
- int size = this.headerSize + 1; // Name has terminating null
- if (name != null) {
- size += name.length();
- }
- final int remain = size % this.alignmentBoundary;
- if (remain > 0){
- return this.alignmentBoundary - remain;
- }
- return 0;
- }
-
- /**
- * Get the number of bytes needed to pad the data to the alignment boundary.
- *
- * @return the number of bytes needed to pad the data (0,1,2,3)
- */
- public int getDataPadCount(){
- if (this.alignmentBoundary == 0) { return 0; }
- final long size = this.filesize;
- final int remain = (int) (size % this.alignmentBoundary);
- if (remain > 0){
- return this.alignmentBoundary - remain;
- }
- return 0;
- }
-
- /**
- * Set the inode.
- *
- * @return Returns the inode.
- */
- public long getInode() {
- return this.inode;
- }
-
- /**
- * Get the mode of this entry (e.g. directory, regular file).
- *
- * @return Returns the mode.
- */
- public long getMode() {
- return mode == 0 && !CPIO_TRAILER.equals(name) ? C_ISREG : mode;
- }
-
- /**
- * Get the name.
- *
- *
This method returns the raw name as it is stored inside of the archive.
- *
- * @return Returns the name.
- */
- @Override
- public String getName() {
- return this.name;
- }
-
- /**
- * Get the number of links.
- *
- * @return Returns the number of links.
- */
- public long getNumberOfLinks() {
- return nlink == 0 ?
- isDirectory() ? 2 : 1
- : nlink;
- }
-
- /**
- * Get the remote device id.
- *
- * @return Returns the remote device id.
- * @throws UnsupportedOperationException
- * if this method is called for a CpioArchiveEntry with a new
- * format.
- */
- public long getRemoteDevice() {
- checkOldFormat();
- return this.rmin;
- }
-
- /**
- * Get the remote major device id.
- *
- * @return Returns the remote major device id.
- * @throws UnsupportedOperationException
- * if this method is called for a CpioArchiveEntry with an old
- * format.
- */
- public long getRemoteDeviceMaj() {
- checkNewFormat();
- return this.rmaj;
- }
-
- /**
- * Get the remote minor device id.
- *
- * @return Returns the remote minor device id.
- * @throws UnsupportedOperationException
- * if this method is called for a CpioArchiveEntry with an old
- * format.
- */
- public long getRemoteDeviceMin() {
- checkNewFormat();
- return this.rmin;
- }
-
- /**
- * Get the time in seconds.
- *
- * @return Returns the time.
- */
- public long getTime() {
- return this.mtime;
- }
-
- @Override
- public Date getLastModifiedDate() {
- return new Date(1000 * getTime());
- }
-
- /**
- * Get the user id.
- *
- * @return Returns the user id.
- */
- public long getUID() {
- return this.uid;
- }
-
- /**
- * Check if this entry represents a block device.
- *
- * @return TRUE if this entry is a block device.
- */
- public boolean isBlockDevice() {
- return CpioUtil.fileType(mode) == C_ISBLK;
- }
-
- /**
- * Check if this entry represents a character device.
- *
- * @return TRUE if this entry is a character device.
- */
- public boolean isCharacterDevice() {
- return CpioUtil.fileType(mode) == C_ISCHR;
- }
-
- /**
- * Check if this entry represents a directory.
- *
- * @return TRUE if this entry is a directory.
- */
- @Override
- public boolean isDirectory() {
- return CpioUtil.fileType(mode) == C_ISDIR;
- }
-
- /**
- * Check if this entry represents a network device.
- *
- * @return TRUE if this entry is a network device.
- */
- public boolean isNetwork() {
- return CpioUtil.fileType(mode) == C_ISNWK;
- }
-
- /**
- * Check if this entry represents a pipe.
- *
- * @return TRUE if this entry is a pipe.
- */
- public boolean isPipe() {
- return CpioUtil.fileType(mode) == C_ISFIFO;
- }
-
- /**
- * Check if this entry represents a regular file.
- *
- * @return TRUE if this entry is a regular file.
- */
- public boolean isRegularFile() {
- return CpioUtil.fileType(mode) == C_ISREG;
- }
-
- /**
- * Check if this entry represents a socket.
- *
- * @return TRUE if this entry is a socket.
- */
- public boolean isSocket() {
- return CpioUtil.fileType(mode) == C_ISSOCK;
- }
-
- /**
- * Check if this entry represents a symbolic link.
- *
- * @return TRUE if this entry is a symbolic link.
- */
- public boolean isSymbolicLink() {
- return CpioUtil.fileType(mode) == C_ISLNK;
- }
-
- /**
- * Set the checksum. The checksum is calculated by adding all bytes of a
- * file to transfer (crc += buf[pos] & 0xFF).
- *
- * @param chksum
- * The checksum to set.
- */
- public void setChksum(final long chksum) {
- checkNewFormat();
- this.chksum = chksum & 0xFFFFFFFFL;
- }
-
- /**
- * Set the device id.
- *
- * @param device
- * The device id to set.
- * @throws UnsupportedOperationException
- * if this method is called for a CpioArchiveEntry with a new
- * format.
- */
- public void setDevice(final long device) {
- checkOldFormat();
- this.min = device;
- }
-
- /**
- * Set major device id.
- *
- * @param maj
- * The major device id to set.
- */
- public void setDeviceMaj(final long maj) {
- checkNewFormat();
- this.maj = maj;
- }
-
- /**
- * Set the minor device id
- *
- * @param min
- * The minor device id to set.
- */
- public void setDeviceMin(final long min) {
- checkNewFormat();
- this.min = min;
- }
-
- /**
- * Set the filesize.
- *
- * @param size
- * The filesize to set.
- */
- public void setSize(final long size) {
- if (size < 0 || size > 0xFFFFFFFFL) {
- throw new IllegalArgumentException("invalid entry size <" + size
- + ">");
- }
- this.filesize = size;
- }
-
- /**
- * Set the group id.
- *
- * @param gid
- * The group id to set.
- */
- public void setGID(final long gid) {
- this.gid = gid;
- }
-
- /**
- * Set the inode.
- *
- * @param inode
- * The inode to set.
- */
- public void setInode(final long inode) {
- this.inode = inode;
- }
-
- /**
- * Set the mode of this entry (e.g. directory, regular file).
- *
- * @param mode
- * The mode to set.
- */
- public void setMode(final long mode) {
- final long maskedMode = mode & S_IFMT;
- switch ((int) maskedMode) {
- case C_ISDIR:
- case C_ISLNK:
- case C_ISREG:
- case C_ISFIFO:
- case C_ISCHR:
- case C_ISBLK:
- case C_ISSOCK:
- case C_ISNWK:
- break;
- default:
- throw new IllegalArgumentException(
- "Unknown mode. "
- + "Full: " + Long.toHexString(mode)
- + " Masked: " + Long.toHexString(maskedMode));
- }
-
- this.mode = mode;
- }
-
- /**
- * Set the name.
- *
- * @param name
- * The name to set.
- */
- public void setName(final String name) {
- this.name = name;
- }
-
- /**
- * Set the number of links.
- *
- * @param nlink
- * The number of links to set.
- */
- public void setNumberOfLinks(final long nlink) {
- this.nlink = nlink;
- }
-
- /**
- * Set the remote device id.
- *
- * @param device
- * The remote device id to set.
- * @throws UnsupportedOperationException
- * if this method is called for a CpioArchiveEntry with a new
- * format.
- */
- public void setRemoteDevice(final long device) {
- checkOldFormat();
- this.rmin = device;
- }
-
- /**
- * Set the remote major device id.
- *
- * @param rmaj
- * The remote major device id to set.
- * @throws UnsupportedOperationException
- * if this method is called for a CpioArchiveEntry with an old
- * format.
- */
- public void setRemoteDeviceMaj(final long rmaj) {
- checkNewFormat();
- this.rmaj = rmaj;
- }
-
- /**
- * Set the remote minor device id.
- *
- * @param rmin
- * The remote minor device id to set.
- * @throws UnsupportedOperationException
- * if this method is called for a CpioArchiveEntry with an old
- * format.
- */
- public void setRemoteDeviceMin(final long rmin) {
- checkNewFormat();
- this.rmin = rmin;
- }
-
- /**
- * Set the time in seconds.
- *
- * @param time
- * The time to set.
- */
- public void setTime(final long time) {
- this.mtime = time;
- }
-
- /**
- * Set the user id.
- *
- * @param uid
- * The user id to set.
- */
- public void setUID(final long uid) {
- this.uid = uid;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + (name == null ? 0 : name.hashCode());
- return result;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#equals(java.lang.Object)
- */
- @Override
- public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
- final CpioArchiveEntry other = (CpioArchiveEntry) obj;
- if (name == null) {
- return other.name == null;
- } else {
- return name.equals(other.name);
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/cpio/CpioArchiveInputStream.java b/src/org/apache/commons/compress/archivers/cpio/CpioArchiveInputStream.java
deleted file mode 100644
index ad8e125a9b2..00000000000
--- a/src/org/apache/commons/compress/archivers/cpio/CpioArchiveInputStream.java
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.cpio;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.archivers.zip.ZipEncoding;
-import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
-import org.apache.commons.compress.utils.ArchiveUtils;
-import org.apache.commons.compress.utils.CharsetNames;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * CpioArchiveInputStream is a stream for reading cpio streams. All formats of
- * cpio are supported (old ascii, old binary, new portable format and the new
- * portable format with crc).
- *
- *
- * The stream can be read by extracting a cpio entry (containing all
- * informations about a entry) and afterwards reading from the stream the file
- * specified by the entry.
- *
- * Note: This implementation should be compatible to cpio 2.5
- *
- *
This class uses mutable fields and is not considered to be threadsafe.
- *
- *
Based on code from the jRPM project (jrpm.sourceforge.net)
- */
-
-public class CpioArchiveInputStream extends ArchiveInputStream implements
- CpioConstants {
-
- private boolean closed = false;
-
- private CpioArchiveEntry entry;
-
- private long entryBytesRead = 0;
-
- private boolean entryEOF = false;
-
- private final byte tmpbuf[] = new byte[4096];
-
- private long crc = 0;
-
- private final InputStream in;
-
- // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection)
- private final byte[] twoBytesBuf = new byte[2];
- private final byte[] fourBytesBuf = new byte[4];
- private final byte[] sixBytesBuf = new byte[6];
-
- private final int blockSize;
-
- /**
- * The encoding to use for filenames and labels.
- */
- private final ZipEncoding zipEncoding;
-
- // the provided encoding (for unit tests)
- final String encoding;
-
- /**
- * Construct the cpio input stream with a blocksize of {@link
- * CpioConstants#BLOCK_SIZE BLOCK_SIZE} and expecting ASCII file
- * names.
- *
- * @param in
- * The cpio stream
- */
- public CpioArchiveInputStream(final InputStream in) {
- this(in, BLOCK_SIZE, CharsetNames.US_ASCII);
- }
-
- /**
- * Construct the cpio input stream with a blocksize of {@link
- * CpioConstants#BLOCK_SIZE BLOCK_SIZE}.
- *
- * @param in
- * The cpio stream
- * @param encoding
- * The encoding of file names to expect - use null for
- * the platform's default.
- * @since 1.6
- */
- public CpioArchiveInputStream(final InputStream in, final String encoding) {
- this(in, BLOCK_SIZE, encoding);
- }
-
- /**
- * Construct the cpio input stream with a blocksize of {@link
- * CpioConstants#BLOCK_SIZE BLOCK_SIZE} expecting ASCII file
- * names.
- *
- * @param in
- * The cpio stream
- * @param blockSize
- * The block size of the archive.
- * @since 1.5
- */
- public CpioArchiveInputStream(final InputStream in, final int blockSize) {
- this(in, blockSize, CharsetNames.US_ASCII);
- }
-
- /**
- * Construct the cpio input stream with a blocksize of {@link CpioConstants#BLOCK_SIZE BLOCK_SIZE}.
- *
- * @param in
- * The cpio stream
- * @param blockSize
- * The block size of the archive.
- * @param encoding
- * The encoding of file names to expect - use null for
- * the platform's default.
- * @since 1.6
- */
- public CpioArchiveInputStream(final InputStream in, final int blockSize, final String encoding) {
- this.in = in;
- this.blockSize = blockSize;
- this.encoding = encoding;
- this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
- }
-
- /**
- * Returns 0 after EOF has reached for the current entry data, otherwise
- * always return 1.
- *
- * Programs should not count on this method to return the actual number of
- * bytes that could be read without blocking.
- *
- * @return 1 before EOF and 0 after EOF has reached for current entry.
- * @throws IOException
- * if an I/O error has occurred or if a CPIO file error has
- * occurred
- */
- @Override
- public int available() throws IOException {
- ensureOpen();
- if (this.entryEOF) {
- return 0;
- }
- return 1;
- }
-
- /**
- * Closes the CPIO input stream.
- *
- * @throws IOException
- * if an I/O error has occurred
- */
- @Override
- public void close() throws IOException {
- if (!this.closed) {
- in.close();
- this.closed = true;
- }
- }
-
- /**
- * Closes the current CPIO entry and positions the stream for reading the
- * next entry.
- *
- * @throws IOException
- * if an I/O error has occurred or if a CPIO file error has
- * occurred
- */
- private void closeEntry() throws IOException {
- // the skip implementation of this class will not skip more
- // than Integer.MAX_VALUE bytes
- while (skip((long) Integer.MAX_VALUE) == Integer.MAX_VALUE) { // NOPMD
- // do nothing
- }
- }
-
- /**
- * Check to make sure that this stream has not been closed
- *
- * @throws IOException
- * if the stream is already closed
- */
- private void ensureOpen() throws IOException {
- if (this.closed) {
- throw new IOException("Stream closed");
- }
- }
-
- /**
- * Reads the next CPIO file entry and positions stream at the beginning of
- * the entry data.
- *
- * @return the CpioArchiveEntry just read
- * @throws IOException
- * if an I/O error has occurred or if a CPIO file error has
- * occurred
- */
- public CpioArchiveEntry getNextCPIOEntry() throws IOException {
- ensureOpen();
- if (this.entry != null) {
- closeEntry();
- }
- readFully(twoBytesBuf, 0, twoBytesBuf.length);
- if (CpioUtil.byteArray2long(twoBytesBuf, false) == MAGIC_OLD_BINARY) {
- this.entry = readOldBinaryEntry(false);
- } else if (CpioUtil.byteArray2long(twoBytesBuf, true)
- == MAGIC_OLD_BINARY) {
- this.entry = readOldBinaryEntry(true);
- } else {
- System.arraycopy(twoBytesBuf, 0, sixBytesBuf, 0,
- twoBytesBuf.length);
- readFully(sixBytesBuf, twoBytesBuf.length,
- fourBytesBuf.length);
- final String magicString = ArchiveUtils.toAsciiString(sixBytesBuf);
- switch (magicString) {
- case MAGIC_NEW:
- this.entry = readNewEntry(false);
- break;
- case MAGIC_NEW_CRC:
- this.entry = readNewEntry(true);
- break;
- case MAGIC_OLD_ASCII:
- this.entry = readOldAsciiEntry();
- break;
- default:
- throw new IOException("Unknown magic [" + magicString + "]. Occured at byte: " + getBytesRead());
- }
- }
-
- this.entryBytesRead = 0;
- this.entryEOF = false;
- this.crc = 0;
-
- if (this.entry.getName().equals(CPIO_TRAILER)) {
- this.entryEOF = true;
- skipRemainderOfLastBlock();
- return null;
- }
- return this.entry;
- }
-
- private void skip(final int bytes) throws IOException{
- // bytes cannot be more than 3 bytes
- if (bytes > 0) {
- readFully(fourBytesBuf, 0, bytes);
- }
- }
-
- /**
- * Reads from the current CPIO entry into an array of bytes. Blocks until
- * some input is available.
- *
- * @param b
- * the buffer into which the data is read
- * @param off
- * the start offset of the data
- * @param len
- * the maximum number of bytes read
- * @return the actual number of bytes read, or -1 if the end of the entry is
- * reached
- * @throws IOException
- * if an I/O error has occurred or if a CPIO file error has
- * occurred
- */
- @Override
- public int read(final byte[] b, final int off, final int len)
- throws IOException {
- ensureOpen();
- if (off < 0 || len < 0 || off > b.length - len) {
- throw new IndexOutOfBoundsException();
- } else if (len == 0) {
- return 0;
- }
-
- if (this.entry == null || this.entryEOF) {
- return -1;
- }
- if (this.entryBytesRead == this.entry.getSize()) {
- skip(entry.getDataPadCount());
- this.entryEOF = true;
- if (this.entry.getFormat() == FORMAT_NEW_CRC
- && this.crc != this.entry.getChksum()) {
- throw new IOException("CRC Error. Occured at byte: "
- + getBytesRead());
- }
- return -1; // EOF for this entry
- }
- final int tmplength = (int) Math.min(len, this.entry.getSize()
- - this.entryBytesRead);
- if (tmplength < 0) {
- return -1;
- }
-
- final int tmpread = readFully(b, off, tmplength);
- if (this.entry.getFormat() == FORMAT_NEW_CRC) {
- for (int pos = 0; pos < tmpread; pos++) {
- this.crc += b[pos] & 0xFF;
- this.crc &= 0xFFFFFFFFL;
- }
- }
- this.entryBytesRead += tmpread;
-
- return tmpread;
- }
-
- private final int readFully(final byte[] b, final int off, final int len)
- throws IOException {
- final int count = IOUtils.readFully(in, b, off, len);
- count(count);
- if (count < len) {
- throw new EOFException();
- }
- return count;
- }
-
- private long readBinaryLong(final int length, final boolean swapHalfWord)
- throws IOException {
- final byte tmp[] = new byte[length];
- readFully(tmp, 0, tmp.length);
- return CpioUtil.byteArray2long(tmp, swapHalfWord);
- }
-
- private long readAsciiLong(final int length, final int radix)
- throws IOException {
- final byte tmpBuffer[] = new byte[length];
- readFully(tmpBuffer, 0, tmpBuffer.length);
- return Long.parseLong(ArchiveUtils.toAsciiString(tmpBuffer), radix);
- }
-
- private CpioArchiveEntry readNewEntry(final boolean hasCrc)
- throws IOException {
- CpioArchiveEntry ret;
- if (hasCrc) {
- ret = new CpioArchiveEntry(FORMAT_NEW_CRC);
- } else {
- ret = new CpioArchiveEntry(FORMAT_NEW);
- }
-
- ret.setInode(readAsciiLong(8, 16));
- final long mode = readAsciiLong(8, 16);
- if (CpioUtil.fileType(mode) != 0){ // mode is initialised to 0
- ret.setMode(mode);
- }
- ret.setUID(readAsciiLong(8, 16));
- ret.setGID(readAsciiLong(8, 16));
- ret.setNumberOfLinks(readAsciiLong(8, 16));
- ret.setTime(readAsciiLong(8, 16));
- ret.setSize(readAsciiLong(8, 16));
- ret.setDeviceMaj(readAsciiLong(8, 16));
- ret.setDeviceMin(readAsciiLong(8, 16));
- ret.setRemoteDeviceMaj(readAsciiLong(8, 16));
- ret.setRemoteDeviceMin(readAsciiLong(8, 16));
- final long namesize = readAsciiLong(8, 16);
- ret.setChksum(readAsciiLong(8, 16));
- final String name = readCString((int) namesize);
- ret.setName(name);
- if (CpioUtil.fileType(mode) == 0 && !name.equals(CPIO_TRAILER)){
- throw new IOException("Mode 0 only allowed in the trailer. Found entry name: "
- + ArchiveUtils.sanitize(name)
- + " Occured at byte: " + getBytesRead());
- }
- skip(ret.getHeaderPadCount());
-
- return ret;
- }
-
- private CpioArchiveEntry readOldAsciiEntry() throws IOException {
- final CpioArchiveEntry ret = new CpioArchiveEntry(FORMAT_OLD_ASCII);
-
- ret.setDevice(readAsciiLong(6, 8));
- ret.setInode(readAsciiLong(6, 8));
- final long mode = readAsciiLong(6, 8);
- if (CpioUtil.fileType(mode) != 0) {
- ret.setMode(mode);
- }
- ret.setUID(readAsciiLong(6, 8));
- ret.setGID(readAsciiLong(6, 8));
- ret.setNumberOfLinks(readAsciiLong(6, 8));
- ret.setRemoteDevice(readAsciiLong(6, 8));
- ret.setTime(readAsciiLong(11, 8));
- final long namesize = readAsciiLong(6, 8);
- ret.setSize(readAsciiLong(11, 8));
- final String name = readCString((int) namesize);
- ret.setName(name);
- if (CpioUtil.fileType(mode) == 0 && !name.equals(CPIO_TRAILER)){
- throw new IOException("Mode 0 only allowed in the trailer. Found entry: "
- + ArchiveUtils.sanitize(name)
- + " Occured at byte: " + getBytesRead());
- }
-
- return ret;
- }
-
- private CpioArchiveEntry readOldBinaryEntry(final boolean swapHalfWord)
- throws IOException {
- final CpioArchiveEntry ret = new CpioArchiveEntry(FORMAT_OLD_BINARY);
-
- ret.setDevice(readBinaryLong(2, swapHalfWord));
- ret.setInode(readBinaryLong(2, swapHalfWord));
- final long mode = readBinaryLong(2, swapHalfWord);
- if (CpioUtil.fileType(mode) != 0){
- ret.setMode(mode);
- }
- ret.setUID(readBinaryLong(2, swapHalfWord));
- ret.setGID(readBinaryLong(2, swapHalfWord));
- ret.setNumberOfLinks(readBinaryLong(2, swapHalfWord));
- ret.setRemoteDevice(readBinaryLong(2, swapHalfWord));
- ret.setTime(readBinaryLong(4, swapHalfWord));
- final long namesize = readBinaryLong(2, swapHalfWord);
- ret.setSize(readBinaryLong(4, swapHalfWord));
- final String name = readCString((int) namesize);
- ret.setName(name);
- if (CpioUtil.fileType(mode) == 0 && !name.equals(CPIO_TRAILER)){
- throw new IOException("Mode 0 only allowed in the trailer. Found entry: "
- + ArchiveUtils.sanitize(name)
- + "Occured at byte: " + getBytesRead());
- }
- skip(ret.getHeaderPadCount());
-
- return ret;
- }
-
- private String readCString(final int length) throws IOException {
- // don't include trailing NUL in file name to decode
- final byte tmpBuffer[] = new byte[length - 1];
- readFully(tmpBuffer, 0, tmpBuffer.length);
- this.in.read();
- return zipEncoding.decode(tmpBuffer);
- }
-
- /**
- * Skips specified number of bytes in the current CPIO entry.
- *
- * @param n
- * the number of bytes to skip
- * @return the actual number of bytes skipped
- * @throws IOException
- * if an I/O error has occurred
- * @throws IllegalArgumentException
- * if n < 0
- */
- @Override
- public long skip(final long n) throws IOException {
- if (n < 0) {
- throw new IllegalArgumentException("negative skip length");
- }
- ensureOpen();
- final int max = (int) Math.min(n, Integer.MAX_VALUE);
- int total = 0;
-
- while (total < max) {
- int len = max - total;
- if (len > this.tmpbuf.length) {
- len = this.tmpbuf.length;
- }
- len = read(this.tmpbuf, 0, len);
- if (len == -1) {
- this.entryEOF = true;
- break;
- }
- total += len;
- }
- return total;
- }
-
- @Override
- public ArchiveEntry getNextEntry() throws IOException {
- return getNextCPIOEntry();
- }
-
- /**
- * Skips the padding zeros written after the TRAILER!!! entry.
- */
- private void skipRemainderOfLastBlock() throws IOException {
- final long readFromLastBlock = getBytesRead() % blockSize;
- long remainingBytes = readFromLastBlock == 0 ? 0
- : blockSize - readFromLastBlock;
- while (remainingBytes > 0) {
- final long skipped = skip(blockSize - readFromLastBlock);
- if (skipped <= 0) {
- break;
- }
- remainingBytes -= skipped;
- }
- }
-
- /**
- * Checks if the signature matches one of the following magic values:
- *
- * Strings:
- *
- * "070701" - MAGIC_NEW
- * "070702" - MAGIC_NEW_CRC
- * "070707" - MAGIC_OLD_ASCII
- *
- * Octal Binary value:
- *
- * 070707 - MAGIC_OLD_BINARY (held as a short) = 0x71C7 or 0xC771
- * @param signature data to match
- * @param length length of data
- * @return whether the buffer seems to contain CPIO data
- */
- public static boolean matches(final byte[] signature, final int length) {
- if (length < 6) {
- return false;
- }
-
- // Check binary values
- if (signature[0] == 0x71 && (signature[1] & 0xFF) == 0xc7) {
- return true;
- }
- if (signature[1] == 0x71 && (signature[0] & 0xFF) == 0xc7) {
- return true;
- }
-
- // Check Ascii (String) values
- // 3037 3037 30nn
- if (signature[0] != 0x30) {
- return false;
- }
- if (signature[1] != 0x37) {
- return false;
- }
- if (signature[2] != 0x30) {
- return false;
- }
- if (signature[3] != 0x37) {
- return false;
- }
- if (signature[4] != 0x30) {
- return false;
- }
- // Check last byte
- if (signature[5] == 0x31) {
- return true;
- }
- if (signature[5] == 0x32) {
- return true;
- }
- if (signature[5] == 0x37) {
- return true;
- }
-
- return false;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/cpio/CpioArchiveOutputStream.java b/src/org/apache/commons/compress/archivers/cpio/CpioArchiveOutputStream.java
deleted file mode 100644
index ed8e2d04910..00000000000
--- a/src/org/apache/commons/compress/archivers/cpio/CpioArchiveOutputStream.java
+++ /dev/null
@@ -1,563 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.cpio;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.util.HashMap;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.archivers.zip.ZipEncoding;
-import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
-import org.apache.commons.compress.utils.ArchiveUtils;
-import org.apache.commons.compress.utils.CharsetNames;
-
-/**
- * CpioArchiveOutputStream is a stream for writing CPIO streams. All formats of
- * CPIO are supported (old ASCII, old binary, new portable format and the new
- * portable format with CRC).
- *
- *
An entry can be written by creating an instance of CpioArchiveEntry and fill
- * it with the necessary values and put it into the CPIO stream. Afterwards
- * write the contents of the file into the CPIO stream. Either close the stream
- * by calling finish() or put a next entry into the cpio stream.
- *
- *
- * CpioArchiveOutputStream out = new CpioArchiveOutputStream(
- * new FileOutputStream(new File("test.cpio")));
- * CpioArchiveEntry entry = new CpioArchiveEntry();
- * entry.setName("testfile");
- * String contents = "12345";
- * entry.setFileSize(contents.length());
- * entry.setMode(CpioConstants.C_ISREG); // regular file
- * ... set other attributes, e.g. time, number of links
- * out.putArchiveEntry(entry);
- * out.write(testContents.getBytes());
- * out.close();
- *
- *
- *
Note: This implementation should be compatible to cpio 2.5
- *
- *
This class uses mutable fields and is not considered threadsafe.
- *
- *
based on code from the jRPM project (jrpm.sourceforge.net)
- */
-public class CpioArchiveOutputStream extends ArchiveOutputStream implements
- CpioConstants {
-
- private CpioArchiveEntry entry;
-
- private boolean closed = false;
-
- /** indicates if this archive is finished */
- private boolean finished;
-
- /**
- * See {@link CpioArchiveEntry#setFormat(short)} for possible values.
- */
- private final short entryFormat;
-
- private final HashMap names =
- new HashMap<>();
-
- private long crc = 0;
-
- private long written;
-
- private final OutputStream out;
-
- private final int blockSize;
-
- private long nextArtificalDeviceAndInode = 1;
-
- /**
- * The encoding to use for filenames and labels.
- */
- private final ZipEncoding zipEncoding;
-
- // the provided encoding (for unit tests)
- final String encoding;
-
- /**
- * Construct the cpio output stream with a specified format, a
- * blocksize of {@link CpioConstants#BLOCK_SIZE BLOCK_SIZE} and
- * using ASCII as the file name encoding.
- *
- * @param out
- * The cpio stream
- * @param format
- * The format of the stream
- */
- public CpioArchiveOutputStream(final OutputStream out, final short format) {
- this(out, format, BLOCK_SIZE, CharsetNames.US_ASCII);
- }
-
- /**
- * Construct the cpio output stream with a specified format using
- * ASCII as the file name encoding.
- *
- * @param out
- * The cpio stream
- * @param format
- * The format of the stream
- * @param blockSize
- * The block size of the archive.
- *
- * @since 1.1
- */
- public CpioArchiveOutputStream(final OutputStream out, final short format,
- final int blockSize) {
- this(out, format, blockSize, CharsetNames.US_ASCII);
- }
-
- /**
- * Construct the cpio output stream with a specified format using
- * ASCII as the file name encoding.
- *
- * @param out
- * The cpio stream
- * @param format
- * The format of the stream
- * @param blockSize
- * The block size of the archive.
- * @param encoding
- * The encoding of file names to write - use null for
- * the platform's default.
- *
- * @since 1.6
- */
- public CpioArchiveOutputStream(final OutputStream out, final short format,
- final int blockSize, final String encoding) {
- this.out = out;
- switch (format) {
- case FORMAT_NEW:
- case FORMAT_NEW_CRC:
- case FORMAT_OLD_ASCII:
- case FORMAT_OLD_BINARY:
- break;
- default:
- throw new IllegalArgumentException("Unknown format: "+format);
-
- }
- this.entryFormat = format;
- this.blockSize = blockSize;
- this.encoding = encoding;
- this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
- }
-
- /**
- * Construct the cpio output stream. The format for this CPIO stream is the
- * "new" format using ASCII encoding for file names
- *
- * @param out
- * The cpio stream
- */
- public CpioArchiveOutputStream(final OutputStream out) {
- this(out, FORMAT_NEW);
- }
-
- /**
- * Construct the cpio output stream. The format for this CPIO stream is the
- * "new" format.
- *
- * @param out
- * The cpio stream
- * @param encoding
- * The encoding of file names to write - use null for
- * the platform's default.
- * @since 1.6
- */
- public CpioArchiveOutputStream(final OutputStream out, final String encoding) {
- this(out, FORMAT_NEW, BLOCK_SIZE, encoding);
- }
-
- /**
- * Check to make sure that this stream has not been closed
- *
- * @throws IOException
- * if the stream is already closed
- */
- private void ensureOpen() throws IOException {
- if (this.closed) {
- throw new IOException("Stream closed");
- }
- }
-
- /**
- * Begins writing a new CPIO file entry and positions the stream to the
- * start of the entry data. Closes the current entry if still active. The
- * current time will be used if the entry has no set modification time and
- * the default header format will be used if no other format is specified in
- * the entry.
- *
- * @param entry
- * the CPIO cpioEntry to be written
- * @throws IOException
- * if an I/O error has occurred or if a CPIO file error has
- * occurred
- * @throws ClassCastException if entry is not an instance of CpioArchiveEntry
- */
- @Override
- public void putArchiveEntry(final ArchiveEntry entry) throws IOException {
- if(finished) {
- throw new IOException("Stream has already been finished");
- }
-
- final CpioArchiveEntry e = (CpioArchiveEntry) entry;
- ensureOpen();
- if (this.entry != null) {
- closeArchiveEntry(); // close previous entry
- }
- if (e.getTime() == -1) {
- e.setTime(System.currentTimeMillis() / 1000);
- }
-
- final short format = e.getFormat();
- if (format != this.entryFormat){
- throw new IOException("Header format: "+format+" does not match existing format: "+this.entryFormat);
- }
-
- if (this.names.put(e.getName(), e) != null) {
- throw new IOException("duplicate entry: " + e.getName());
- }
-
- writeHeader(e);
- this.entry = e;
- this.written = 0;
- }
-
- private void writeHeader(final CpioArchiveEntry e) throws IOException {
- switch (e.getFormat()) {
- case FORMAT_NEW:
- out.write(ArchiveUtils.toAsciiBytes(MAGIC_NEW));
- count(6);
- writeNewEntry(e);
- break;
- case FORMAT_NEW_CRC:
- out.write(ArchiveUtils.toAsciiBytes(MAGIC_NEW_CRC));
- count(6);
- writeNewEntry(e);
- break;
- case FORMAT_OLD_ASCII:
- out.write(ArchiveUtils.toAsciiBytes(MAGIC_OLD_ASCII));
- count(6);
- writeOldAsciiEntry(e);
- break;
- case FORMAT_OLD_BINARY:
- final boolean swapHalfWord = true;
- writeBinaryLong(MAGIC_OLD_BINARY, 2, swapHalfWord);
- writeOldBinaryEntry(e, swapHalfWord);
- break;
- default:
- throw new IOException("unknown format " + e.getFormat());
- }
- }
-
- private void writeNewEntry(final CpioArchiveEntry entry) throws IOException {
- long inode = entry.getInode();
- long devMin = entry.getDeviceMin();
- if (CPIO_TRAILER.equals(entry.getName())) {
- inode = devMin = 0;
- } else {
- if (inode == 0 && devMin == 0) {
- inode = nextArtificalDeviceAndInode & 0xFFFFFFFF;
- devMin = (nextArtificalDeviceAndInode++ >> 32) & 0xFFFFFFFF;
- } else {
- nextArtificalDeviceAndInode =
- Math.max(nextArtificalDeviceAndInode,
- inode + 0x100000000L * devMin) + 1;
- }
- }
-
- writeAsciiLong(inode, 8, 16);
- writeAsciiLong(entry.getMode(), 8, 16);
- writeAsciiLong(entry.getUID(), 8, 16);
- writeAsciiLong(entry.getGID(), 8, 16);
- writeAsciiLong(entry.getNumberOfLinks(), 8, 16);
- writeAsciiLong(entry.getTime(), 8, 16);
- writeAsciiLong(entry.getSize(), 8, 16);
- writeAsciiLong(entry.getDeviceMaj(), 8, 16);
- writeAsciiLong(devMin, 8, 16);
- writeAsciiLong(entry.getRemoteDeviceMaj(), 8, 16);
- writeAsciiLong(entry.getRemoteDeviceMin(), 8, 16);
- writeAsciiLong(entry.getName().length() + 1L, 8, 16);
- writeAsciiLong(entry.getChksum(), 8, 16);
- writeCString(entry.getName());
- pad(entry.getHeaderPadCount());
- }
-
- private void writeOldAsciiEntry(final CpioArchiveEntry entry)
- throws IOException {
- long inode = entry.getInode();
- long device = entry.getDevice();
- if (CPIO_TRAILER.equals(entry.getName())) {
- inode = device = 0;
- } else {
- if (inode == 0 && device == 0) {
- inode = nextArtificalDeviceAndInode & 0777777;
- device = (nextArtificalDeviceAndInode++ >> 18) & 0777777;
- } else {
- nextArtificalDeviceAndInode =
- Math.max(nextArtificalDeviceAndInode,
- inode + 01000000 * device) + 1;
- }
- }
-
- writeAsciiLong(device, 6, 8);
- writeAsciiLong(inode, 6, 8);
- writeAsciiLong(entry.getMode(), 6, 8);
- writeAsciiLong(entry.getUID(), 6, 8);
- writeAsciiLong(entry.getGID(), 6, 8);
- writeAsciiLong(entry.getNumberOfLinks(), 6, 8);
- writeAsciiLong(entry.getRemoteDevice(), 6, 8);
- writeAsciiLong(entry.getTime(), 11, 8);
- writeAsciiLong(entry.getName().length() + 1L, 6, 8);
- writeAsciiLong(entry.getSize(), 11, 8);
- writeCString(entry.getName());
- }
-
- private void writeOldBinaryEntry(final CpioArchiveEntry entry,
- final boolean swapHalfWord) throws IOException {
- long inode = entry.getInode();
- long device = entry.getDevice();
- if (CPIO_TRAILER.equals(entry.getName())) {
- inode = device = 0;
- } else {
- if (inode == 0 && device == 0) {
- inode = nextArtificalDeviceAndInode & 0xFFFF;
- device = (nextArtificalDeviceAndInode++ >> 16) & 0xFFFF;
- } else {
- nextArtificalDeviceAndInode =
- Math.max(nextArtificalDeviceAndInode,
- inode + 0x10000 * device) + 1;
- }
- }
-
- writeBinaryLong(device, 2, swapHalfWord);
- writeBinaryLong(inode, 2, swapHalfWord);
- writeBinaryLong(entry.getMode(), 2, swapHalfWord);
- writeBinaryLong(entry.getUID(), 2, swapHalfWord);
- writeBinaryLong(entry.getGID(), 2, swapHalfWord);
- writeBinaryLong(entry.getNumberOfLinks(), 2, swapHalfWord);
- writeBinaryLong(entry.getRemoteDevice(), 2, swapHalfWord);
- writeBinaryLong(entry.getTime(), 4, swapHalfWord);
- writeBinaryLong(entry.getName().length() + 1L, 2, swapHalfWord);
- writeBinaryLong(entry.getSize(), 4, swapHalfWord);
- writeCString(entry.getName());
- pad(entry.getHeaderPadCount());
- }
-
- /*(non-Javadoc)
- *
- * @see
- * org.apache.commons.compress.archivers.ArchiveOutputStream#closeArchiveEntry
- * ()
- */
- @Override
- public void closeArchiveEntry() throws IOException {
- if(finished) {
- throw new IOException("Stream has already been finished");
- }
-
- ensureOpen();
-
- if (entry == null) {
- throw new IOException("Trying to close non-existent entry");
- }
-
- if (this.entry.getSize() != this.written) {
- throw new IOException("invalid entry size (expected "
- + this.entry.getSize() + " but got " + this.written
- + " bytes)");
- }
- pad(this.entry.getDataPadCount());
- if (this.entry.getFormat() == FORMAT_NEW_CRC
- && this.crc != this.entry.getChksum()) {
- throw new IOException("CRC Error");
- }
- this.entry = null;
- this.crc = 0;
- this.written = 0;
- }
-
- /**
- * Writes an array of bytes to the current CPIO entry data. This method will
- * block until all the bytes are written.
- *
- * @param b
- * the data to be written
- * @param off
- * the start offset in the data
- * @param len
- * the number of bytes that are written
- * @throws IOException
- * if an I/O error has occurred or if a CPIO file error has
- * occurred
- */
- @Override
- public void write(final byte[] b, final int off, final int len)
- throws IOException {
- ensureOpen();
- if (off < 0 || len < 0 || off > b.length - len) {
- throw new IndexOutOfBoundsException();
- } else if (len == 0) {
- return;
- }
-
- if (this.entry == null) {
- throw new IOException("no current CPIO entry");
- }
- if (this.written + len > this.entry.getSize()) {
- throw new IOException("attempt to write past end of STORED entry");
- }
- out.write(b, off, len);
- this.written += len;
- if (this.entry.getFormat() == FORMAT_NEW_CRC) {
- for (int pos = 0; pos < len; pos++) {
- this.crc += b[pos] & 0xFF;
- this.crc &= 0xFFFFFFFFL;
- }
- }
- count(len);
- }
-
- /**
- * Finishes writing the contents of the CPIO output stream without closing
- * the underlying stream. Use this method when applying multiple filters in
- * succession to the same output stream.
- *
- * @throws IOException
- * if an I/O exception has occurred or if a CPIO file error has
- * occurred
- */
- @Override
- public void finish() throws IOException {
- ensureOpen();
- if (finished) {
- throw new IOException("This archive has already been finished");
- }
-
- if (this.entry != null) {
- throw new IOException("This archive contains unclosed entries.");
- }
- this.entry = new CpioArchiveEntry(this.entryFormat);
- this.entry.setName(CPIO_TRAILER);
- this.entry.setNumberOfLinks(1);
- writeHeader(this.entry);
- closeArchiveEntry();
-
- final int lengthOfLastBlock = (int) (getBytesWritten() % blockSize);
- if (lengthOfLastBlock != 0) {
- pad(blockSize - lengthOfLastBlock);
- }
-
- finished = true;
- }
-
- /**
- * Closes the CPIO output stream as well as the stream being filtered.
- *
- * @throws IOException
- * if an I/O error has occurred or if a CPIO file error has
- * occurred
- */
- @Override
- public void close() throws IOException {
- if(!finished) {
- finish();
- }
-
- if (!this.closed) {
- out.close();
- this.closed = true;
- }
- }
-
- private void pad(final int count) throws IOException{
- if (count > 0){
- final byte buff[] = new byte[count];
- out.write(buff);
- count(count);
- }
- }
-
- private void writeBinaryLong(final long number, final int length,
- final boolean swapHalfWord) throws IOException {
- final byte tmp[] = CpioUtil.long2byteArray(number, length, swapHalfWord);
- out.write(tmp);
- count(tmp.length);
- }
-
- private void writeAsciiLong(final long number, final int length,
- final int radix) throws IOException {
- final StringBuilder tmp = new StringBuilder();
- String tmpStr;
- if (radix == 16) {
- tmp.append(Long.toHexString(number));
- } else if (radix == 8) {
- tmp.append(Long.toOctalString(number));
- } else {
- tmp.append(Long.toString(number));
- }
-
- if (tmp.length() <= length) {
- final int insertLength = length - tmp.length();
- for (int pos = 0; pos < insertLength; pos++) {
- tmp.insert(0, "0");
- }
- tmpStr = tmp.toString();
- } else {
- tmpStr = tmp.substring(tmp.length() - length);
- }
- final byte[] b = ArchiveUtils.toAsciiBytes(tmpStr);
- out.write(b);
- count(b.length);
- }
-
- /**
- * Writes an ASCII string to the stream followed by \0
- * @param str the String to write
- * @throws IOException if the string couldn't be written
- */
- private void writeCString(final String str) throws IOException {
- final ByteBuffer buf = zipEncoding.encode(str);
- final int len = buf.limit() - buf.position();
- out.write(buf.array(), buf.arrayOffset(), len);
- out.write('\0');
- count(len + 1);
- }
-
- /**
- * Creates a new ArchiveEntry. The entryName must be an ASCII encoded string.
- *
- * @see org.apache.commons.compress.archivers.ArchiveOutputStream#createArchiveEntry(java.io.File, java.lang.String)
- */
- @Override
- public ArchiveEntry createArchiveEntry(final File inputFile, final String entryName)
- throws IOException {
- if(finished) {
- throw new IOException("Stream has already been finished");
- }
- return new CpioArchiveEntry(inputFile, entryName);
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/cpio/CpioConstants.java b/src/org/apache/commons/compress/archivers/cpio/CpioConstants.java
deleted file mode 100644
index efba28251ba..00000000000
--- a/src/org/apache/commons/compress/archivers/cpio/CpioConstants.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.cpio;
-
-/**
- * All constants needed by CPIO.
- *
- * based on code from the jRPM project (jrpm.sourceforge.net)
- *
- */
-public interface CpioConstants {
- /** magic number of a cpio entry in the new format */
- String MAGIC_NEW = "070701";
-
- /** magic number of a cpio entry in the new format with crc */
- String MAGIC_NEW_CRC = "070702";
-
- /** magic number of a cpio entry in the old ascii format */
- String MAGIC_OLD_ASCII = "070707";
-
- /** magic number of a cpio entry in the old binary format */
- int MAGIC_OLD_BINARY = 070707;
-
- // These FORMAT_ constants are internal to the code
-
- /** write/read a CpioArchiveEntry in the new format */
- short FORMAT_NEW = 1;
-
- /** write/read a CpioArchiveEntry in the new format with crc */
- short FORMAT_NEW_CRC = 2;
-
- /** write/read a CpioArchiveEntry in the old ascii format */
- short FORMAT_OLD_ASCII = 4;
-
- /** write/read a CpioArchiveEntry in the old binary format */
- short FORMAT_OLD_BINARY = 8;
-
- /** Mask for both new formats */
- short FORMAT_NEW_MASK = 3;
-
- /** Mask for both old formats */
- short FORMAT_OLD_MASK = 12;
-
- /*
- * Constants for the MODE bits
- */
-
- /** Mask for all file type bits. */
- int S_IFMT = 0170000;
-
- // http://www.opengroup.org/onlinepubs/9699919799/basedefs/cpio.h.html
- // has a list of the C_xxx constatnts
-
- /** Defines a socket */
- int C_ISSOCK = 0140000;
-
- /** Defines a symbolic link */
- int C_ISLNK = 0120000;
-
- /** HP/UX network special (C_ISCTG) */
- int C_ISNWK = 0110000;
-
- /** Defines a regular file */
- int C_ISREG = 0100000;
-
- /** Defines a block device */
- int C_ISBLK = 0060000;
-
- /** Defines a directory */
- int C_ISDIR = 0040000;
-
- /** Defines a character device */
- int C_ISCHR = 0020000;
-
- /** Defines a pipe */
- int C_ISFIFO = 0010000;
-
-
- /** Set user ID */
- int C_ISUID = 0004000;
-
- /** Set group ID */
- int C_ISGID = 0002000;
-
- /** On directories, restricted deletion flag. */
- int C_ISVTX = 0001000;
-
-
- /** Permits the owner of a file to read the file */
- int C_IRUSR = 0000400;
-
- /** Permits the owner of a file to write to the file */
- int C_IWUSR = 0000200;
-
- /** Permits the owner of a file to execute the file or to search the directory */
- int C_IXUSR = 0000100;
-
-
- /** Permits a file's group to read the file */
- int C_IRGRP = 0000040;
-
- /** Permits a file's group to write to the file */
- int C_IWGRP = 0000020;
-
- /** Permits a file's group to execute the file or to search the directory */
- int C_IXGRP = 0000010;
-
-
- /** Permits others to read the file */
- int C_IROTH = 0000004;
-
- /** Permits others to write to the file */
- int C_IWOTH = 0000002;
-
- /** Permits others to execute the file or to search the directory */
- int C_IXOTH = 0000001;
-
- /** The special trailer marker */
- String CPIO_TRAILER = "TRAILER!!!";
-
- /**
- * The default block size.
- *
- * @since 1.1
- */
- int BLOCK_SIZE = 512;
-}
diff --git a/src/org/apache/commons/compress/archivers/cpio/CpioUtil.java b/src/org/apache/commons/compress/archivers/cpio/CpioUtil.java
deleted file mode 100644
index f53ea4424d1..00000000000
--- a/src/org/apache/commons/compress/archivers/cpio/CpioUtil.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.cpio;
-
-/**
- * Package private utility class for Cpio
- *
- * @Immutable
- */
-class CpioUtil {
-
- /**
- * Extracts the file type bits from a mode.
- */
- static long fileType(final long mode) {
- return mode & CpioConstants.S_IFMT;
- }
-
- /**
- * Converts a byte array to a long. Halfwords can be swapped by setting
- * swapHalfWord=true.
- *
- * @param number
- * An array of bytes containing a number
- * @param swapHalfWord
- * Swap halfwords ([0][1][2][3]->[1][0][3][2])
- * @return The long value
- * @throws UnsupportedOperationException if number length is not a multiple of 2
- */
- static long byteArray2long(final byte[] number, final boolean swapHalfWord) {
- if (number.length % 2 != 0) {
- throw new UnsupportedOperationException();
- }
-
- long ret = 0;
- int pos = 0;
- final byte tmp_number[] = new byte[number.length];
- System.arraycopy(number, 0, tmp_number, 0, number.length);
-
- if (!swapHalfWord) {
- byte tmp = 0;
- for (pos = 0; pos < tmp_number.length; pos++) {
- tmp = tmp_number[pos];
- tmp_number[pos++] = tmp_number[pos];
- tmp_number[pos] = tmp;
- }
- }
-
- ret = tmp_number[0] & 0xFF;
- for (pos = 1; pos < tmp_number.length; pos++) {
- ret <<= 8;
- ret |= tmp_number[pos] & 0xFF;
- }
- return ret;
- }
-
- /**
- * Converts a long number to a byte array
- * Halfwords can be swapped by setting swapHalfWord=true.
- *
- * @param number
- * the input long number to be converted
- *
- * @param length
- * The length of the returned array
- * @param swapHalfWord
- * Swap halfwords ([0][1][2][3]->[1][0][3][2])
- * @return The long value
- * @throws UnsupportedOperationException if the length is not a positive multiple of two
- */
- static byte[] long2byteArray(final long number, final int length,
- final boolean swapHalfWord) {
- final byte[] ret = new byte[length];
- int pos = 0;
- long tmp_number = 0;
-
- if (length % 2 != 0 || length < 2) {
- throw new UnsupportedOperationException();
- }
-
- tmp_number = number;
- for (pos = length - 1; pos >= 0; pos--) {
- ret[pos] = (byte) (tmp_number & 0xFF);
- tmp_number >>= 8;
- }
-
- if (!swapHalfWord) {
- byte tmp = 0;
- for (pos = 0; pos < length; pos++) {
- tmp = ret[pos];
- ret[pos++] = ret[pos];
- ret[pos] = tmp;
- }
- }
-
- return ret;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/cpio/package.html b/src/org/apache/commons/compress/archivers/cpio/package.html
deleted file mode 100644
index 985828725ec..00000000000
--- a/src/org/apache/commons/compress/archivers/cpio/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
Provides stream classes for reading and writing archives using
- the CPIO format.
-
-
diff --git a/src/org/apache/commons/compress/archivers/dump/Dirent.java b/src/org/apache/commons/compress/archivers/dump/Dirent.java
deleted file mode 100644
index b5af964a381..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/Dirent.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-/**
- * Directory entry.
- */
-class Dirent {
- private final int ino;
- private final int parentIno;
- private final int type;
- private final String name;
-
- /**
- * Constructor
- *
- * @param ino
- * @param parentIno
- * @param type
- * @param name
- */
- Dirent(final int ino, final int parentIno, final int type, final String name) {
- this.ino = ino;
- this.parentIno = parentIno;
- this.type = type;
- this.name = name;
- }
-
- /**
- * Get ino.
- * @return the i-node
- */
- int getIno() {
- return ino;
- }
-
- /**
- * Get ino of parent directory.
- * @return the parent i-node
- */
- int getParentIno() {
- return parentIno;
- }
-
- /**
- * Get entry type.
- * @return the entry type
- */
- int getType() {
- return type;
- }
-
- /**
- * Get name of directory entry.
- *
- *
This method returns the raw name as it is stored inside of the archive.
- *
- * @return the directory name
- */
- String getName() {
- return name;
- }
-
- /**
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return String.format("[%d]: %s", ino, name);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java b/src/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java
deleted file mode 100644
index 2430f4aa2a6..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/DumpArchiveConstants.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-/**
- * Various constants associated with dump archives.
- */
-public final class DumpArchiveConstants {
- public static final int TP_SIZE = 1024;
- public static final int NTREC = 10;
- public static final int HIGH_DENSITY_NTREC = 32;
- public static final int OFS_MAGIC = 60011;
- public static final int NFS_MAGIC = 60012;
- public static final int FS_UFS2_MAGIC = 0x19540119;
- public static final int CHECKSUM = 84446;
- public static final int LBLSIZE = 16;
- public static final int NAMELEN = 64;
-
- /* do not instantiate */
- private DumpArchiveConstants() {
- }
-
- /**
- * The type of tape segment.
- */
- public enum SEGMENT_TYPE {
- TAPE(1),
- INODE(2),
- BITS(3),
- ADDR(4),
- END(5),
- CLRI(6);
-
- int code;
-
- SEGMENT_TYPE(final int code) {
- this.code = code;
- }
-
- public static SEGMENT_TYPE find(final int code) {
- for (final SEGMENT_TYPE t : values()) {
- if (t.code == code) {
- return t;
- }
- }
-
- return null;
- }
- }
-
- /**
- * The type of compression.
- */
- public enum COMPRESSION_TYPE {
- ZLIB(0),
- BZLIB(1),
- LZO(2);
-
- int code;
-
- COMPRESSION_TYPE(final int code) {
- this.code = code;
- }
-
- public static COMPRESSION_TYPE find(final int code) {
- for (final COMPRESSION_TYPE t : values()) {
- if (t.code == code) {
- return t;
- }
- }
-
- return null;
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java b/src/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java
deleted file mode 100644
index e284505f251..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/DumpArchiveEntry.java
+++ /dev/null
@@ -1,845 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-import java.util.Collections;
-import java.util.Date;
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.Set;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-
-/**
- * This class represents an entry in a Dump archive. It consists
- * of the entry's header, the entry's File and any extended attributes.
- *
- * DumpEntries that are created from the header bytes read from
- * an archive are instantiated with the DumpArchiveEntry( byte[] )
- * constructor. These entries will be used when extracting from
- * or listing the contents of an archive. These entries have their
- * header filled in using the header bytes. They also set the File
- * to null, since they reference an archive entry not a file.
- *
- * DumpEntries can also be constructed from nothing but a name.
- * This allows the programmer to construct the entry by hand, for
- * instance when only an InputStream is available for writing to
- * the archive, and the header information is constructed from
- * other information. In this case the header fields are set to
- * defaults and the File is set to null.
- *
- *
- * The C structure for a Dump Entry's header is:
- *
- * #define TP_BSIZE 1024 // size of each file block
- * #define NTREC 10 // number of blocks to write at once
- * #define HIGHDENSITYTREC 32 // number of blocks to write on high-density tapes
- * #define TP_NINDIR (TP_BSIZE/2) // number if indirect inodes in record
- * #define TP_NINOS (TP_NINDIR / sizeof (int32_t))
- * #define LBLSIZE 16
- * #define NAMELEN 64
- *
- * #define OFS_MAGIC (int)60011 // old format magic value
- * #define NFS_MAGIC (int)60012 // new format magic value
- * #define FS_UFS2_MAGIC (int)0x19540119
- * #define CHECKSUM (int)84446 // constant used in checksum algorithm
- *
- * struct s_spcl {
- * int32_t c_type; // record type (see below)
- * int32_t c_date; // date of this dump
- * int32_t c_ddate; // date of previous dump
- * int32_t c_volume; // dump volume number
- * u_int32_t c_tapea; // logical block of this record
- * dump_ino_t c_ino; // number of inode
- * int32_t c_magic; // magic number (see above)
- * int32_t c_checksum; // record checksum
- * #ifdef __linux__
- * struct new_bsd_inode c_dinode;
- * #else
- * #ifdef sunos
- * struct new_bsd_inode c_dinode;
- * #else
- * struct dinode c_dinode; // ownership and mode of inode
- * #endif
- * #endif
- * int32_t c_count; // number of valid c_addr entries
- * union u_data c_data; // see above
- * char c_label[LBLSIZE]; // dump label
- * int32_t c_level; // level of this dump
- * char c_filesys[NAMELEN]; // name of dumpped file system
- * char c_dev[NAMELEN]; // name of dumpped device
- * char c_host[NAMELEN]; // name of dumpped host
- * int32_t c_flags; // additional information (see below)
- * int32_t c_firstrec; // first record on volume
- * int32_t c_ntrec; // blocksize on volume
- * int32_t c_extattributes; // additional inode info (see below)
- * int32_t c_spare[30]; // reserved for future uses
- * } s_spcl;
- *
- * //
- * // flag values
- * //
- * #define DR_NEWHEADER 0x0001 // new format tape header
- * #define DR_NEWINODEFMT 0x0002 // new format inodes on tape
- * #define DR_COMPRESSED 0x0080 // dump tape is compressed
- * #define DR_METAONLY 0x0100 // only the metadata of the inode has been dumped
- * #define DR_INODEINFO 0x0002 // [SIC] TS_END header contains c_inos information
- * #define DR_EXTATTRIBUTES 0x8000
- *
- * //
- * // extattributes inode info
- * //
- * #define EXT_REGULAR 0
- * #define EXT_MACOSFNDRINFO 1
- * #define EXT_MACOSRESFORK 2
- * #define EXT_XATTR 3
- *
- * // used for EA on tape
- * #define EXT2_GOOD_OLD_INODE_SIZE 128
- * #define EXT2_XATTR_MAGIC 0xEA020000 // block EA
- * #define EXT2_XATTR_MAGIC2 0xEA020001 // in inode EA
- *
- *
- * The fields in bold are the same for all blocks. (This permitted
- * multiple dumps to be written to a single tape.)
- *
- *
- *
- * The C structure for the inode (file) information is:
- *
- * struct bsdtimeval { // **** alpha-*-linux is deviant
- * __u32 tv_sec;
- * __u32 tv_usec;
- * };
- *
- * #define NDADDR 12
- * #define NIADDR 3
- *
- * //
- * // This is the new (4.4) BSD inode structure
- * // copied from the FreeBSD 2.0 <ufs/ufs/dinode.h> include file
- * //
- * struct new_bsd_inode {
- * __u16 di_mode; // file type, standard Unix permissions
- * __s16 di_nlink; // number of hard links to file.
- * union {
- * __u16 oldids[2];
- * __u32 inumber;
- * } di_u;
- * u_quad_t di_size; // file size
- * struct bsdtimeval di_atime; // time file was last accessed
- * struct bsdtimeval di_mtime; // time file was last modified
- * struct bsdtimeval di_ctime; // time file was created
- * __u32 di_db[NDADDR];
- * __u32 di_ib[NIADDR];
- * __u32 di_flags; //
- * __s32 di_blocks; // number of disk blocks
- * __s32 di_gen; // generation number
- * __u32 di_uid; // user id (see /etc/passwd)
- * __u32 di_gid; // group id (see /etc/group)
- * __s32 di_spare[2]; // unused
- * };
- *
- *
- * It is important to note that the header DOES NOT have the name of the
- * file. It can't since hard links mean that you may have multiple filenames
- * for a single physical file. You must read the contents of the directory
- * entries to learn the mapping(s) from filename to inode.
- *
- *
- *
- * The C structure that indicates if a specific block is a real block
- * that contains data or is a sparse block that is not persisted to the
- * disk is:
- *
- * #define TP_BSIZE 1024
- * #define TP_NINDIR (TP_BSIZE/2)
- *
- * union u_data {
- * char s_addrs[TP_NINDIR]; // 1 => data; 0 => hole in inode
- * int32_t s_inos[TP_NINOS]; // table of first inode on each volume
- * } u_data;
- *
- *
- * @NotThreadSafe
- */
-public class DumpArchiveEntry implements ArchiveEntry {
- private String name;
- private TYPE type = TYPE.UNKNOWN;
- private int mode;
- private Set permissions = Collections.emptySet();
- private long size;
- private long atime;
- private long mtime;
- private int uid;
- private int gid;
-
- /**
- * Currently unused
- */
- private final DumpArchiveSummary summary = null;
-
- // this information is available from standard index.
- private final TapeSegmentHeader header = new TapeSegmentHeader();
- private String simpleName;
- private String originalName;
-
- // this information is available from QFA index
- private int volume;
- private long offset;
- private int ino;
- private int nlink;
- private long ctime;
- private int generation;
- private boolean isDeleted;
-
- /**
- * Default constructor.
- */
- public DumpArchiveEntry() {
- }
-
- /**
- * Constructor taking only filename.
- * @param name pathname
- * @param simpleName actual filename.
- */
- public DumpArchiveEntry(final String name, final String simpleName) {
- setName(name);
- this.simpleName = simpleName;
- }
-
- /**
- * Constructor taking name, inode and type.
- *
- * @param name the name
- * @param simpleName the simple name
- * @param ino the ino
- * @param type the type
- */
- protected DumpArchiveEntry(final String name, final String simpleName, final int ino,
- final TYPE type) {
- setType(type);
- setName(name);
- this.simpleName = simpleName;
- this.ino = ino;
- this.offset = 0;
- }
-
- /**
- * Returns the path of the entry.
- * @return the path of the entry.
- */
- public String getSimpleName() {
- return simpleName;
- }
-
- /**
- * Sets the path of the entry.
- * @param simpleName the simple name
- */
- protected void setSimpleName(final String simpleName) {
- this.simpleName = simpleName;
- }
-
- /**
- * Returns the ino of the entry.
- * @return the ino
- */
- public int getIno() {
- return header.getIno();
- }
-
- /**
- * Return the number of hard links to the entry.
- * @return the number of hard links
- */
- public int getNlink() {
- return nlink;
- }
-
- /**
- * Set the number of hard links.
- * @param nlink the number of hard links
- */
- public void setNlink(final int nlink) {
- this.nlink = nlink;
- }
-
- /**
- * Get file creation time.
- * @return the creation time
- */
- public Date getCreationTime() {
- return new Date(ctime);
- }
-
- /**
- * Set the file creation time.
- * @param ctime the creation time
- */
- public void setCreationTime(final Date ctime) {
- this.ctime = ctime.getTime();
- }
-
- /**
- * Return the generation of the file.
- * @return the generation
- */
- public int getGeneration() {
- return generation;
- }
-
- /**
- * Set the generation of the file.
- * @param generation the generation
- */
- public void setGeneration(final int generation) {
- this.generation = generation;
- }
-
- /**
- * Has this file been deleted? (On valid on incremental dumps.)
- * @return whether the file has been deleted
- */
- public boolean isDeleted() {
- return isDeleted;
- }
-
- /**
- * Set whether this file has been deleted.
- * @param isDeleted whether the file has been deleted
- */
- public void setDeleted(final boolean isDeleted) {
- this.isDeleted = isDeleted;
- }
-
- /**
- * Return the offset within the archive
- * @return the offset
- */
- public long getOffset() {
- return offset;
- }
-
- /**
- * Set the offset within the archive.
- * @param offset the offset
- */
- public void setOffset(final long offset) {
- this.offset = offset;
- }
-
- /**
- * Return the tape volume where this file is located.
- * @return the volume
- */
- public int getVolume() {
- return volume;
- }
-
- /**
- * Set the tape volume.
- * @param volume the volume
- */
- public void setVolume(final int volume) {
- this.volume = volume;
- }
-
- /**
- * Return the type of the tape segment header.
- * @return the segment header
- */
- public DumpArchiveConstants.SEGMENT_TYPE getHeaderType() {
- return header.getType();
- }
-
- /**
- * Return the number of records in this segment.
- * @return the number of records
- */
- public int getHeaderCount() {
- return header.getCount();
- }
-
- /**
- * Return the number of sparse records in this segment.
- * @return the number of sparse records
- */
- public int getHeaderHoles() {
- return header.getHoles();
- }
-
- /**
- * Is this a sparse record?
- * @param idx index of the record to check
- * @return whether this is a sparse record
- */
- public boolean isSparseRecord(final int idx) {
- return (header.getCdata(idx) & 0x01) == 0;
- }
-
- @Override
- public int hashCode() {
- return ino;
- }
-
- @Override
- public boolean equals(final Object o) {
- if (o == this) {
- return true;
- } else if (o == null || !o.getClass().equals(getClass())) {
- return false;
- }
-
- final DumpArchiveEntry rhs = (DumpArchiveEntry) o;
-
- if (rhs.header == null) {
- return false;
- }
-
- if (ino != rhs.ino) {
- return false;
- }
-
- // summary is always null right now, but this may change some day
- if ((summary == null && rhs.summary != null) // NOSONAR
- || (summary != null && !summary.equals(rhs.summary))) { // NOSONAR
- return false;
- }
-
- return true;
- }
-
- @Override
- public String toString() {
- return getName();
- }
-
- /**
- * Populate the dump archive entry and tape segment header with
- * the contents of the buffer.
- *
- * @param buffer buffer to read content from
- */
- static DumpArchiveEntry parse(final byte[] buffer) {
- final DumpArchiveEntry entry = new DumpArchiveEntry();
- final TapeSegmentHeader header = entry.header;
-
- header.type = DumpArchiveConstants.SEGMENT_TYPE.find(DumpArchiveUtil.convert32(
- buffer, 0));
-
- //header.dumpDate = new Date(1000L * DumpArchiveUtil.convert32(buffer, 4));
- //header.previousDumpDate = new Date(1000L * DumpArchiveUtil.convert32(
- // buffer, 8));
- header.volume = DumpArchiveUtil.convert32(buffer, 12);
- //header.tapea = DumpArchiveUtil.convert32(buffer, 16);
- entry.ino = header.ino = DumpArchiveUtil.convert32(buffer, 20);
-
- //header.magic = DumpArchiveUtil.convert32(buffer, 24);
- //header.checksum = DumpArchiveUtil.convert32(buffer, 28);
- final int m = DumpArchiveUtil.convert16(buffer, 32);
-
- // determine the type of the file.
- entry.setType(TYPE.find((m >> 12) & 0x0F));
-
- // determine the standard permissions
- entry.setMode(m);
-
- entry.nlink = DumpArchiveUtil.convert16(buffer, 34);
- // inumber, oldids?
- entry.setSize(DumpArchiveUtil.convert64(buffer, 40));
-
- long t = (1000L * DumpArchiveUtil.convert32(buffer, 48)) +
- (DumpArchiveUtil.convert32(buffer, 52) / 1000);
- entry.setAccessTime(new Date(t));
- t = (1000L * DumpArchiveUtil.convert32(buffer, 56)) +
- (DumpArchiveUtil.convert32(buffer, 60) / 1000);
- entry.setLastModifiedDate(new Date(t));
- t = (1000L * DumpArchiveUtil.convert32(buffer, 64)) +
- (DumpArchiveUtil.convert32(buffer, 68) / 1000);
- entry.ctime = t;
-
- // db: 72-119 - direct blocks
- // id: 120-131 - indirect blocks
- //entry.flags = DumpArchiveUtil.convert32(buffer, 132);
- //entry.blocks = DumpArchiveUtil.convert32(buffer, 136);
- entry.generation = DumpArchiveUtil.convert32(buffer, 140);
- entry.setUserId(DumpArchiveUtil.convert32(buffer, 144));
- entry.setGroupId(DumpArchiveUtil.convert32(buffer, 148));
- // two 32-bit spare values.
- header.count = DumpArchiveUtil.convert32(buffer, 160);
-
- header.holes = 0;
-
- for (int i = 0; (i < 512) && (i < header.count); i++) {
- if (buffer[164 + i] == 0) {
- header.holes++;
- }
- }
-
- System.arraycopy(buffer, 164, header.cdata, 0, 512);
-
- entry.volume = header.getVolume();
-
- //entry.isSummaryOnly = false;
- return entry;
- }
-
- /**
- * Update entry with information from next tape segment header.
- */
- void update(final byte[] buffer) {
- header.volume = DumpArchiveUtil.convert32(buffer, 16);
- header.count = DumpArchiveUtil.convert32(buffer, 160);
-
- header.holes = 0;
-
- for (int i = 0; (i < 512) && (i < header.count); i++) {
- if (buffer[164 + i] == 0) {
- header.holes++;
- }
- }
-
- System.arraycopy(buffer, 164, header.cdata, 0, 512);
- }
-
- /**
- * Archive entry as stored on tape. There is one TSH for (at most)
- * every 512k in the file.
- */
- static class TapeSegmentHeader {
- private DumpArchiveConstants.SEGMENT_TYPE type;
- private int volume;
- private int ino;
- private int count;
- private int holes;
- private final byte[] cdata = new byte[512]; // map of any 'holes'
-
- public DumpArchiveConstants.SEGMENT_TYPE getType() {
- return type;
- }
-
- public int getVolume() {
- return volume;
- }
-
- public int getIno() {
- return ino;
- }
-
- void setIno(final int ino) {
- this.ino = ino;
- }
-
- public int getCount() {
- return count;
- }
-
- public int getHoles() {
- return holes;
- }
-
- public int getCdata(final int idx) {
- return cdata[idx];
- }
- }
-
- /**
- * Returns the name of the entry.
- *
- *
This method returns the raw name as it is stored inside of the archive.
- *
- * @return the name of the entry.
- */
- @Override
- public String getName() {
- return name;
- }
-
- /**
- * Returns the unmodified name of the entry.
- * @return the name of the entry.
- */
- String getOriginalName() {
- return originalName;
- }
-
- /**
- * Sets the name of the entry.
- * @param name the name
- */
- public final void setName(String name) {
- this.originalName = name;
- if (name != null) {
- if (isDirectory() && !name.endsWith("/")) {
- name += "/";
- }
- if (name.startsWith("./")) {
- name = name.substring(2);
- }
- }
- this.name = name;
- }
-
- /**
- * The last modified date.
- * @return the last modified date
- */
- @Override
- public Date getLastModifiedDate() {
- return new Date(mtime);
- }
-
- /**
- * Is this a directory?
- * @return whether this is a directory
- */
- @Override
- public boolean isDirectory() {
- return type == TYPE.DIRECTORY;
- }
-
- /**
- * Is this a regular file?
- * @return whether this is a regular file
- */
- public boolean isFile() {
- return type == TYPE.FILE;
- }
-
- /**
- * Is this a network device?
- * @return whether this is a socket
- */
- public boolean isSocket() {
- return type == TYPE.SOCKET;
- }
-
- /**
- * Is this a character device?
- * @return whether this is a character device
- */
- public boolean isChrDev() {
- return type == TYPE.CHRDEV;
- }
-
- /**
- * Is this a block device?
- * @return whether this is a block device
- */
- public boolean isBlkDev() {
- return type == TYPE.BLKDEV;
- }
-
- /**
- * Is this a fifo/pipe?
- * @return whether this is a fifo
- */
- public boolean isFifo() {
- return type == TYPE.FIFO;
- }
-
- /**
- * Get the type of the entry.
- * @return the type
- */
- public TYPE getType() {
- return type;
- }
-
- /**
- * Set the type of the entry.
- * @param type the type
- */
- public void setType(final TYPE type) {
- this.type = type;
- }
-
- /**
- * Return the access permissions on the entry.
- * @return the access permissions
- */
- public int getMode() {
- return mode;
- }
-
- /**
- * Set the access permissions on the entry.
- * @param mode the access permissions
- */
- public void setMode(final int mode) {
- this.mode = mode & 07777;
- this.permissions = PERMISSION.find(mode);
- }
-
- /**
- * Returns the permissions on the entry.
- * @return the permissions
- */
- public Set getPermissions() {
- return permissions;
- }
-
- /**
- * Returns the size of the entry.
- * @return the size
- */
- @Override
- public long getSize() {
- return isDirectory() ? SIZE_UNKNOWN : size;
- }
-
- /**
- * Returns the size of the entry as read from the archive.
- */
- long getEntrySize() {
- return size;
- }
-
- /**
- * Set the size of the entry.
- * @param size the size
- */
- public void setSize(final long size) {
- this.size = size;
- }
-
- /**
- * Set the time the file was last modified.
- * @param mtime the last modified time
- */
- public void setLastModifiedDate(final Date mtime) {
- this.mtime = mtime.getTime();
- }
-
- /**
- * Returns the time the file was last accessed.
- * @return the access time
- */
- public Date getAccessTime() {
- return new Date(atime);
- }
-
- /**
- * Set the time the file was last accessed.
- * @param atime the access time
- */
- public void setAccessTime(final Date atime) {
- this.atime = atime.getTime();
- }
-
- /**
- * Return the user id.
- * @return the user id
- */
- public int getUserId() {
- return uid;
- }
-
- /**
- * Set the user id.
- * @param uid the user id
- */
- public void setUserId(final int uid) {
- this.uid = uid;
- }
-
- /**
- * Return the group id
- * @return the group id
- */
- public int getGroupId() {
- return gid;
- }
-
- /**
- * Set the group id.
- * @param gid the group id
- */
- public void setGroupId(final int gid) {
- this.gid = gid;
- }
-
- public enum TYPE {
- WHITEOUT(14),
- SOCKET(12),
- LINK(10),
- FILE(8),
- BLKDEV(6),
- DIRECTORY(4),
- CHRDEV(2),
- FIFO(1),
- UNKNOWN(15);
-
- private int code;
-
- TYPE(final int code) {
- this.code = code;
- }
-
- public static TYPE find(final int code) {
- TYPE type = UNKNOWN;
-
- for (final TYPE t : TYPE.values()) {
- if (code == t.code) {
- type = t;
- }
- }
-
- return type;
- }
- }
-
- public enum PERMISSION {
- SETUID(04000),
- SETGUI(02000),
- STICKY(01000),
- USER_READ(00400),
- USER_WRITE(00200),
- USER_EXEC(00100),
- GROUP_READ(00040),
- GROUP_WRITE(00020),
- GROUP_EXEC(00010),
- WORLD_READ(00004),
- WORLD_WRITE(00002),
- WORLD_EXEC(00001);
-
- private int code;
-
- PERMISSION(final int code) {
- this.code = code;
- }
-
- public static Set find(final int code) {
- final Set set = new HashSet<>();
-
- for (final PERMISSION p : PERMISSION.values()) {
- if ((code & p.code) == p.code) {
- set.add(p);
- }
- }
-
- if (set.isEmpty()) {
- return Collections.emptySet();
- }
-
- return EnumSet.copyOf(set);
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/DumpArchiveException.java b/src/org/apache/commons/compress/archivers/dump/DumpArchiveException.java
deleted file mode 100644
index 635b1d9fbf0..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/DumpArchiveException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-import java.io.IOException;
-
-
-/**
- * Dump Archive Exception
- */
-public class DumpArchiveException extends IOException {
- private static final long serialVersionUID = 1L;
-
- public DumpArchiveException() {
- }
-
- public DumpArchiveException(final String msg) {
- super(msg);
- }
-
- public DumpArchiveException(final Throwable cause) {
- initCause(cause);
- }
-
- public DumpArchiveException(final String msg, final Throwable cause) {
- super(msg);
- initCause(cause);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/DumpArchiveInputStream.java b/src/org/apache/commons/compress/archivers/dump/DumpArchiveInputStream.java
deleted file mode 100644
index ed4f02fa085..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/DumpArchiveInputStream.java
+++ /dev/null
@@ -1,561 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.archivers.zip.ZipEncoding;
-import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.PriorityQueue;
-import java.util.Queue;
-import java.util.Stack;
-
-/**
- * The DumpArchiveInputStream reads a UNIX dump archive as an InputStream.
- * Methods are provided to position at each successive entry in
- * the archive, and the read each entry as a normal input stream
- * using read().
- *
- * There doesn't seem to exist a hint on the encoding of string values
- * in any piece documentation. Given the main purpose of dump/restore
- * is backing up a system it seems very likely the format uses the
- * current default encoding of the system.
- *
- * @NotThreadSafe
- */
-public class DumpArchiveInputStream extends ArchiveInputStream {
- private DumpArchiveSummary summary;
- private DumpArchiveEntry active;
- private boolean isClosed;
- private boolean hasHitEOF;
- private long entrySize;
- private long entryOffset;
- private int readIdx;
- private final byte[] readBuf = new byte[DumpArchiveConstants.TP_SIZE];
- private byte[] blockBuffer;
- private int recordOffset;
- private long filepos;
- protected TapeInputStream raw;
-
- // map of ino -> dirent entry. We can use this to reconstruct full paths.
- private final Map names = new HashMap<>();
-
- // map of ino -> (directory) entry when we're missing one or more elements in the path.
- private final Map pending = new HashMap<>();
-
- // queue of (directory) entries where we now have the full path.
- private Queue queue;
-
- /**
- * The encoding to use for filenames and labels.
- */
- private final ZipEncoding zipEncoding;
-
- // the provided encoding (for unit tests)
- final String encoding;
-
- /**
- * Constructor using the platform's default encoding for file
- * names.
- *
- * @param is stream to read from
- * @throws ArchiveException on error
- */
- public DumpArchiveInputStream(final InputStream is) throws ArchiveException {
- this(is, null);
- }
-
- /**
- * Constructor.
- *
- * @param is stream to read from
- * @param encoding the encoding to use for file names, use null
- * for the platform's default encoding
- * @since 1.6
- * @throws ArchiveException on error
- */
- public DumpArchiveInputStream(final InputStream is, final String encoding)
- throws ArchiveException {
- this.raw = new TapeInputStream(is);
- this.hasHitEOF = false;
- this.encoding = encoding;
- this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
-
- try {
- // read header, verify it's a dump archive.
- final byte[] headerBytes = raw.readRecord();
-
- if (!DumpArchiveUtil.verify(headerBytes)) {
- throw new UnrecognizedFormatException();
- }
-
- // get summary information
- summary = new DumpArchiveSummary(headerBytes, this.zipEncoding);
-
- // reset buffer with actual block size.
- raw.resetBlockSize(summary.getNTRec(), summary.isCompressed());
-
- // allocate our read buffer.
- blockBuffer = new byte[4 * DumpArchiveConstants.TP_SIZE];
-
- // skip past CLRI and BITS segments since we don't handle them yet.
- readCLRI();
- readBITS();
- } catch (final IOException ex) {
- throw new ArchiveException(ex.getMessage(), ex);
- }
-
- // put in a dummy record for the root node.
- final Dirent root = new Dirent(2, 2, 4, ".");
- names.put(2, root);
-
- // use priority based on queue to ensure parent directories are
- // released first.
- queue = new PriorityQueue<>(10,
- new Comparator() {
- @Override
- public int compare(final DumpArchiveEntry p, final DumpArchiveEntry q) {
- if (p.getOriginalName() == null || q.getOriginalName() == null) {
- return Integer.MAX_VALUE;
- }
-
- return p.getOriginalName().compareTo(q.getOriginalName());
- }
- });
- }
-
- @Deprecated
- @Override
- public int getCount() {
- return (int) getBytesRead();
- }
-
- @Override
- public long getBytesRead() {
- return raw.getBytesRead();
- }
-
- /**
- * Return the archive summary information.
- * @return the summary
- */
- public DumpArchiveSummary getSummary() {
- return summary;
- }
-
- /**
- * Read CLRI (deleted inode) segment.
- */
- private void readCLRI() throws IOException {
- final byte[] buffer = raw.readRecord();
-
- if (!DumpArchiveUtil.verify(buffer)) {
- throw new InvalidFormatException();
- }
-
- active = DumpArchiveEntry.parse(buffer);
-
- if (DumpArchiveConstants.SEGMENT_TYPE.CLRI != active.getHeaderType()) {
- throw new InvalidFormatException();
- }
-
- // we don't do anything with this yet.
- if (raw.skip((long) DumpArchiveConstants.TP_SIZE * active.getHeaderCount())
- == -1) {
- throw new EOFException();
- }
- readIdx = active.getHeaderCount();
- }
-
- /**
- * Read BITS segment.
- */
- private void readBITS() throws IOException {
- final byte[] buffer = raw.readRecord();
-
- if (!DumpArchiveUtil.verify(buffer)) {
- throw new InvalidFormatException();
- }
-
- active = DumpArchiveEntry.parse(buffer);
-
- if (DumpArchiveConstants.SEGMENT_TYPE.BITS != active.getHeaderType()) {
- throw new InvalidFormatException();
- }
-
- // we don't do anything with this yet.
- if (raw.skip((long) DumpArchiveConstants.TP_SIZE * active.getHeaderCount())
- == -1) {
- throw new EOFException();
- }
- readIdx = active.getHeaderCount();
- }
-
- /**
- * Read the next entry.
- * @return the next entry
- * @throws IOException on error
- */
- public DumpArchiveEntry getNextDumpEntry() throws IOException {
- return getNextEntry();
- }
-
- @Override
- public DumpArchiveEntry getNextEntry() throws IOException {
- DumpArchiveEntry entry = null;
- String path = null;
-
- // is there anything in the queue?
- if (!queue.isEmpty()) {
- return queue.remove();
- }
-
- while (entry == null) {
- if (hasHitEOF) {
- return null;
- }
-
- // skip any remaining records in this segment for prior file.
- // we might still have holes... easiest to do it
- // block by block. We may want to revisit this if
- // the unnecessary decompression time adds up.
- while (readIdx < active.getHeaderCount()) {
- if (!active.isSparseRecord(readIdx++)
- && raw.skip(DumpArchiveConstants.TP_SIZE) == -1) {
- throw new EOFException();
- }
- }
-
- readIdx = 0;
- filepos = raw.getBytesRead();
-
- byte[] headerBytes = raw.readRecord();
-
- if (!DumpArchiveUtil.verify(headerBytes)) {
- throw new InvalidFormatException();
- }
-
- active = DumpArchiveEntry.parse(headerBytes);
-
- // skip any remaining segments for prior file.
- while (DumpArchiveConstants.SEGMENT_TYPE.ADDR == active.getHeaderType()) {
- if (raw.skip((long) DumpArchiveConstants.TP_SIZE
- * (active.getHeaderCount()
- - active.getHeaderHoles())) == -1) {
- throw new EOFException();
- }
-
- filepos = raw.getBytesRead();
- headerBytes = raw.readRecord();
-
- if (!DumpArchiveUtil.verify(headerBytes)) {
- throw new InvalidFormatException();
- }
-
- active = DumpArchiveEntry.parse(headerBytes);
- }
-
- // check if this is an end-of-volume marker.
- if (DumpArchiveConstants.SEGMENT_TYPE.END == active.getHeaderType()) {
- hasHitEOF = true;
-
- return null;
- }
-
- entry = active;
-
- if (entry.isDirectory()) {
- readDirectoryEntry(active);
-
- // now we create an empty InputStream.
- entryOffset = 0;
- entrySize = 0;
- readIdx = active.getHeaderCount();
- } else {
- entryOffset = 0;
- entrySize = active.getEntrySize();
- readIdx = 0;
- }
-
- recordOffset = readBuf.length;
-
- path = getPath(entry);
-
- if (path == null) {
- entry = null;
- }
- }
-
- entry.setName(path);
- entry.setSimpleName(names.get(entry.getIno()).getName());
- entry.setOffset(filepos);
-
- return entry;
- }
-
- /**
- * Read directory entry.
- */
- private void readDirectoryEntry(DumpArchiveEntry entry)
- throws IOException {
- long size = entry.getEntrySize();
- boolean first = true;
-
- while (first ||
- DumpArchiveConstants.SEGMENT_TYPE.ADDR == entry.getHeaderType()) {
- // read the header that we just peeked at.
- if (!first) {
- raw.readRecord();
- }
-
- if (!names.containsKey(entry.getIno()) &&
- DumpArchiveConstants.SEGMENT_TYPE.INODE == entry.getHeaderType()) {
- pending.put(entry.getIno(), entry);
- }
-
- final int datalen = DumpArchiveConstants.TP_SIZE * entry.getHeaderCount();
-
- if (blockBuffer.length < datalen) {
- blockBuffer = new byte[datalen];
- }
-
- if (raw.read(blockBuffer, 0, datalen) != datalen) {
- throw new EOFException();
- }
-
- int reclen = 0;
-
- for (int i = 0; i < datalen - 8 && i < size - 8;
- i += reclen) {
- final int ino = DumpArchiveUtil.convert32(blockBuffer, i);
- reclen = DumpArchiveUtil.convert16(blockBuffer, i + 4);
-
- final byte type = blockBuffer[i + 6];
-
- final String name = DumpArchiveUtil.decode(zipEncoding, blockBuffer, i + 8, blockBuffer[i + 7]);
-
- if (".".equals(name) || "..".equals(name)) {
- // do nothing...
- continue;
- }
-
- final Dirent d = new Dirent(ino, entry.getIno(), type, name);
-
- /*
- if ((type == 4) && names.containsKey(ino)) {
- System.out.println("we already have ino: " +
- names.get(ino));
- }
- */
-
- names.put(ino, d);
-
- // check whether this allows us to fill anything in the pending list.
- for (final Map.Entry e : pending.entrySet()) {
- final String path = getPath(e.getValue());
-
- if (path != null) {
- e.getValue().setName(path);
- e.getValue()
- .setSimpleName(names.get(e.getKey()).getName());
- queue.add(e.getValue());
- }
- }
-
- // remove anything that we found. (We can't do it earlier
- // because of concurrent modification exceptions.)
- for (final DumpArchiveEntry e : queue) {
- pending.remove(e.getIno());
- }
- }
-
- final byte[] peekBytes = raw.peek();
-
- if (!DumpArchiveUtil.verify(peekBytes)) {
- throw new InvalidFormatException();
- }
-
- entry = DumpArchiveEntry.parse(peekBytes);
- first = false;
- size -= DumpArchiveConstants.TP_SIZE;
- }
- }
-
- /**
- * Get full path for specified archive entry, or null if there's a gap.
- *
- * @param entry
- * @return full path for specified archive entry, or null if there's a gap.
- */
- private String getPath(final DumpArchiveEntry entry) {
- // build the stack of elements. It's possible that we're
- // still missing an intermediate value and if so we
- final Stack elements = new Stack<>();
- Dirent dirent = null;
-
- for (int i = entry.getIno();; i = dirent.getParentIno()) {
- if (!names.containsKey(i)) {
- elements.clear();
- break;
- }
-
- dirent = names.get(i);
- elements.push(dirent.getName());
-
- if (dirent.getIno() == dirent.getParentIno()) {
- break;
- }
- }
-
- // if an element is missing defer the work and read next entry.
- if (elements.isEmpty()) {
- pending.put(entry.getIno(), entry);
-
- return null;
- }
-
- // generate full path from stack of elements.
- final StringBuilder sb = new StringBuilder(elements.pop());
-
- while (!elements.isEmpty()) {
- sb.append('/');
- sb.append(elements.pop());
- }
-
- return sb.toString();
- }
-
- /**
- * Reads bytes from the current dump archive entry.
- *
- * This method is aware of the boundaries of the current
- * entry in the archive and will deal with them as if they
- * were this stream's start and EOF.
- *
- * @param buf The buffer into which to place bytes read.
- * @param off The offset at which to place bytes read.
- * @param len The number of bytes to read.
- * @return The number of bytes read, or -1 at EOF.
- * @throws IOException on error
- */
- @Override
- public int read(final byte[] buf, int off, int len) throws IOException {
- int totalRead = 0;
-
- if (hasHitEOF || isClosed || entryOffset >= entrySize) {
- return -1;
- }
-
- if (active == null) {
- throw new IllegalStateException("No current dump entry");
- }
-
- if (len + entryOffset > entrySize) {
- len = (int) (entrySize - entryOffset);
- }
-
- while (len > 0) {
- final int sz = len > readBuf.length - recordOffset
- ? readBuf.length - recordOffset : len;
-
- // copy any data we have
- if (recordOffset + sz <= readBuf.length) {
- System.arraycopy(readBuf, recordOffset, buf, off, sz);
- totalRead += sz;
- recordOffset += sz;
- len -= sz;
- off += sz;
- }
-
- // load next block if necessary.
- if (len > 0) {
- if (readIdx >= 512) {
- final byte[] headerBytes = raw.readRecord();
-
- if (!DumpArchiveUtil.verify(headerBytes)) {
- throw new InvalidFormatException();
- }
-
- active = DumpArchiveEntry.parse(headerBytes);
- readIdx = 0;
- }
-
- if (!active.isSparseRecord(readIdx++)) {
- final int r = raw.read(readBuf, 0, readBuf.length);
- if (r != readBuf.length) {
- throw new EOFException();
- }
- } else {
- Arrays.fill(readBuf, (byte) 0);
- }
-
- recordOffset = 0;
- }
- }
-
- entryOffset += totalRead;
-
- return totalRead;
- }
-
- /**
- * Closes the stream for this entry.
- */
- @Override
- public void close() throws IOException {
- if (!isClosed) {
- isClosed = true;
- raw.close();
- }
- }
-
- /**
- * Look at the first few bytes of the file to decide if it's a dump
- * archive. With 32 bytes we can look at the magic value, with a full
- * 1k we can verify the checksum.
- * @param buffer data to match
- * @param length length of data
- * @return whether the buffer seems to contain dump data
- */
- public static boolean matches(final byte[] buffer, final int length) {
- // do we have enough of the header?
- if (length < 32) {
- return false;
- }
-
- // this is the best test
- if (length >= DumpArchiveConstants.TP_SIZE) {
- return DumpArchiveUtil.verify(buffer);
- }
-
- // this will work in a pinch.
- return DumpArchiveConstants.NFS_MAGIC == DumpArchiveUtil.convert32(buffer,
- 24);
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/DumpArchiveSummary.java b/src/org/apache/commons/compress/archivers/dump/DumpArchiveSummary.java
deleted file mode 100644
index f17e70f13ec..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/DumpArchiveSummary.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-import java.io.IOException;
-import java.util.Date;
-
-import org.apache.commons.compress.archivers.zip.ZipEncoding;
-
-/**
- * This class represents identifying information about a Dump archive volume.
- * It consists the archive's dump date, label, hostname, device name and possibly
- * last mount point plus the volume's volume id andfirst record number.
- *
- * For the corresponding C structure see the header of {@link DumpArchiveEntry}.
- */
-public class DumpArchiveSummary {
- private long dumpDate;
- private long previousDumpDate;
- private int volume;
- private String label;
- private int level;
- private String filesys;
- private String devname;
- private String hostname;
- private int flags;
- private int firstrec;
- private int ntrec;
-
- DumpArchiveSummary(final byte[] buffer, final ZipEncoding encoding) throws IOException {
- dumpDate = 1000L * DumpArchiveUtil.convert32(buffer, 4);
- previousDumpDate = 1000L * DumpArchiveUtil.convert32(buffer, 8);
- volume = DumpArchiveUtil.convert32(buffer, 12);
- label = DumpArchiveUtil.decode(encoding, buffer, 676, DumpArchiveConstants.LBLSIZE).trim();
- level = DumpArchiveUtil.convert32(buffer, 692);
- filesys = DumpArchiveUtil.decode(encoding, buffer, 696, DumpArchiveConstants.NAMELEN).trim();
- devname = DumpArchiveUtil.decode(encoding, buffer, 760, DumpArchiveConstants.NAMELEN).trim();
- hostname = DumpArchiveUtil.decode(encoding, buffer, 824, DumpArchiveConstants.NAMELEN).trim();
- flags = DumpArchiveUtil.convert32(buffer, 888);
- firstrec = DumpArchiveUtil.convert32(buffer, 892);
- ntrec = DumpArchiveUtil.convert32(buffer, 896);
-
- //extAttributes = DumpArchiveUtil.convert32(buffer, 900);
- }
-
- /**
- * Get the date of this dump.
- * @return the date of this dump.
- */
- public Date getDumpDate() {
- return new Date(dumpDate);
- }
-
- /**
- * Set dump date.
- * @param dumpDate the dump date
- */
- public void setDumpDate(final Date dumpDate) {
- this.dumpDate = dumpDate.getTime();
- }
-
- /**
- * Get the date of the previous dump at this level higher.
- * @return dumpdate may be null
- */
- public Date getPreviousDumpDate() {
- return new Date(previousDumpDate);
- }
-
- /**
- * Set previous dump date.
- * @param previousDumpDate the previous dump dat
- */
- public void setPreviousDumpDate(final Date previousDumpDate) {
- this.previousDumpDate = previousDumpDate.getTime();
- }
-
- /**
- * Get volume (tape) number.
- * @return volume (tape) number.
- */
- public int getVolume() {
- return volume;
- }
-
- /**
- * Set volume (tape) number.
- * @param volume the volume number
- */
- public void setVolume(final int volume) {
- this.volume = volume;
- }
-
- /**
- * Get the level of this dump. This is a number between 0 and 9, inclusive,
- * and a level 0 dump is a complete dump of the partition. For any other dump
- * 'n' this dump contains all files that have changed since the last dump
- * at this level or lower. This is used to support different levels of
- * incremental backups.
- * @return dump level
- */
- public int getLevel() {
- return level;
- }
-
- /**
- * Set level.
- * @param level the level
- */
- public void setLevel(final int level) {
- this.level = level;
- }
-
- /**
- * Get dump label. This may be autogenerated or it may be specified
- * bu the user.
- * @return dump label
- */
- public String getLabel() {
- return label;
- }
-
- /**
- * Set dump label.
- * @param label the label
- */
- public void setLabel(final String label) {
- this.label = label;
- }
-
- /**
- * Get the last mountpoint, e.g., /home.
- * @return last mountpoint
- */
- public String getFilesystem() {
- return filesys;
- }
-
- /**
- * Set the last mountpoint.
- * @param filesystem the last mountpoint
- */
- public void setFilesystem(final String filesystem) {
- this.filesys = filesystem;
- }
-
- /**
- * Get the device name, e.g., /dev/sda3 or /dev/mapper/vg0-home.
- * @return device name
- */
- public String getDevname() {
- return devname;
- }
-
- /**
- * Set the device name.
- * @param devname the device name
- */
- public void setDevname(final String devname) {
- this.devname = devname;
- }
-
- /**
- * Get the hostname of the system where the dump was performed.
- * @return hostname the host name
- */
- public String getHostname() {
- return hostname;
- }
-
- /**
- * Set the hostname.
- * @param hostname the host name
- */
- public void setHostname(final String hostname) {
- this.hostname = hostname;
- }
-
- /**
- * Get the miscellaneous flags. See below.
- * @return flags
- */
- public int getFlags() {
- return flags;
- }
-
- /**
- * Set the miscellaneous flags.
- * @param flags flags
- */
- public void setFlags(final int flags) {
- this.flags = flags;
- }
-
- /**
- * Get the inode of the first record on this volume.
- * @return inode of the first record on this volume.
- */
- public int getFirstRecord() {
- return firstrec;
- }
-
- /**
- * Set the inode of the first record.
- * @param firstrec the first record
- */
- public void setFirstRecord(final int firstrec) {
- this.firstrec = firstrec;
- }
-
- /**
- * Get the number of records per tape block. This is typically
- * between 10 and 32.
- * @return the number of records per tape block
- */
- public int getNTRec() {
- return ntrec;
- }
-
- /**
- * Set the number of records per tape block.
- * @param ntrec the number of records per tape block
- */
- public void setNTRec(final int ntrec) {
- this.ntrec = ntrec;
- }
-
- /**
- * Is this the new header format? (We do not currently support the
- * old format.)
- *
- * @return true if using new header format
- */
- public boolean isNewHeader() {
- return (flags & 0x0001) == 0x0001;
- }
-
- /**
- * Is this the new inode format? (We do not currently support the
- * old format.)
- * @return true if using new inode format
- */
- public boolean isNewInode() {
- return (flags & 0x0002) == 0x0002;
- }
-
- /**
- * Is this volume compressed? N.B., individual blocks may or may not be compressed.
- * The first block is never compressed.
- * @return true if volume is compressed
- */
- public boolean isCompressed() {
- return (flags & 0x0080) == 0x0080;
- }
-
- /**
- * Does this volume only contain metadata?
- * @return true if volume only contains meta-data
- */
- public boolean isMetaDataOnly() {
- return (flags & 0x0100) == 0x0100;
- }
-
- /**
- * Does this volume cotain extended attributes.
- * @return true if volume cotains extended attributes.
- */
- public boolean isExtendedAttributes() {
- return (flags & 0x8000) == 0x8000;
- }
-
- @Override
- public int hashCode() {
- int hash = 17;
-
- if (label != null) {
- hash = label.hashCode();
- }
-
- hash += 31 * dumpDate;
-
- if (hostname != null) {
- hash = (31 * hostname.hashCode()) + 17;
- }
-
- if (devname != null) {
- hash = (31 * devname.hashCode()) + 17;
- }
-
- return hash;
- }
-
- @Override
- public boolean equals(final Object o) {
- if (this == o) {
- return true;
- }
-
- if (o == null || !o.getClass().equals(getClass())) {
- return false;
- }
-
- final DumpArchiveSummary rhs = (DumpArchiveSummary) o;
-
- if (dumpDate != rhs.dumpDate) {
- return false;
- }
-
- if ((getHostname() == null) ||
- !getHostname().equals(rhs.getHostname())) {
- return false;
- }
-
- if ((getDevname() == null) || !getDevname().equals(rhs.getDevname())) {
- return false;
- }
-
- return true;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/DumpArchiveUtil.java b/src/org/apache/commons/compress/archivers/dump/DumpArchiveUtil.java
deleted file mode 100644
index 20e1eb3f271..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/DumpArchiveUtil.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-import java.io.IOException;
-import java.util.Arrays;
-import org.apache.commons.compress.archivers.zip.ZipEncoding;
-import org.apache.commons.compress.utils.ByteUtils;
-
-/**
- * Various utilities for dump archives.
- */
-class DumpArchiveUtil {
- /**
- * Private constructor to prevent instantiation.
- */
- private DumpArchiveUtil() {
- }
-
- /**
- * Calculate checksum for buffer.
- *
- * @param buffer buffer containing tape segment header
- * @returns checksum
- */
- public static int calculateChecksum(final byte[] buffer) {
- int calc = 0;
-
- for (int i = 0; i < 256; i++) {
- calc += DumpArchiveUtil.convert32(buffer, 4 * i);
- }
-
- return DumpArchiveConstants.CHECKSUM -
- (calc - DumpArchiveUtil.convert32(buffer, 28));
- }
-
- /**
- * Verify that the buffer contains a tape segment header.
- *
- * @param buffer
- */
- public static final boolean verify(final byte[] buffer) {
- // verify magic. for now only accept NFS_MAGIC.
- final int magic = convert32(buffer, 24);
-
- if (magic != DumpArchiveConstants.NFS_MAGIC) {
- return false;
- }
-
- //verify checksum...
- final int checksum = convert32(buffer, 28);
-
- return checksum == calculateChecksum(buffer);
- }
-
- /**
- * Get the ino associated with this buffer.
- *
- * @param buffer
- */
- public static final int getIno(final byte[] buffer) {
- return convert32(buffer, 20);
- }
-
- /**
- * Read 8-byte integer from buffer.
- *
- * @param buffer
- * @param offset
- * @return the 8-byte entry as a long
- */
- public static final long convert64(final byte[] buffer, final int offset) {
- return ByteUtils.fromLittleEndian(buffer, offset, 8);
- }
-
- /**
- * Read 4-byte integer from buffer.
- *
- * @param buffer
- * @param offset
- * @return the 4-byte entry as an int
- */
- public static final int convert32(final byte[] buffer, final int offset) {
- return (int) ByteUtils.fromLittleEndian(buffer, offset, 4);
- }
-
- /**
- * Read 2-byte integer from buffer.
- *
- * @param buffer
- * @param offset
- * @return the 2-byte entry as an int
- */
- public static final int convert16(final byte[] buffer, final int offset) {
- return (int) ByteUtils.fromLittleEndian(buffer, offset, 2);
- }
-
- /**
- * Decodes a byte array to a string.
- */
- static String decode(final ZipEncoding encoding, final byte[] b, final int offset, final int len)
- throws IOException {
- return encoding.decode(Arrays.copyOfRange(b, offset, offset + len));
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/InvalidFormatException.java b/src/org/apache/commons/compress/archivers/dump/InvalidFormatException.java
deleted file mode 100644
index 6169dfe90f5..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/InvalidFormatException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-
-/**
- * Invalid Format Exception. There was an error decoding a
- * tape segment header.
- */
-public class InvalidFormatException extends DumpArchiveException {
- private static final long serialVersionUID = 1L;
- protected long offset;
-
- public InvalidFormatException() {
- super("there was an error decoding a tape segment");
- }
-
- public InvalidFormatException(final long offset) {
- super("there was an error decoding a tape segment header at offset " +
- offset + ".");
- this.offset = offset;
- }
-
- public long getOffset() {
- return offset;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/ShortFileException.java b/src/org/apache/commons/compress/archivers/dump/ShortFileException.java
deleted file mode 100644
index e06c97cc08f..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/ShortFileException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-
-/**
- * Short File Exception. There was an unexpected EOF when reading
- * the input stream.
- */
-public class ShortFileException extends DumpArchiveException {
- private static final long serialVersionUID = 1L;
-
- public ShortFileException() {
- super("unexpected EOF");
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/TapeInputStream.java b/src/org/apache/commons/compress/archivers/dump/TapeInputStream.java
deleted file mode 100644
index 5643decef8f..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/TapeInputStream.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-import java.util.Arrays;
-import java.util.zip.DataFormatException;
-import java.util.zip.Inflater;
-
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Filter stream that mimics a physical tape drive capable of compressing
- * the data stream.
- *
- * @NotThreadSafe
- */
-class TapeInputStream extends FilterInputStream {
- private byte[] blockBuffer = new byte[DumpArchiveConstants.TP_SIZE];
- private int currBlkIdx = -1;
- private int blockSize = DumpArchiveConstants.TP_SIZE;
- private static final int RECORD_SIZE = DumpArchiveConstants.TP_SIZE;
- private int readOffset = DumpArchiveConstants.TP_SIZE;
- private boolean isCompressed = false;
- private long bytesRead = 0;
-
- /**
- * Constructor
- */
- public TapeInputStream(final InputStream in) {
- super(in);
- }
-
- /**
- * Set the DumpArchive Buffer's block size. We need to sync the block size with the
- * dump archive's actual block size since compression is handled at the
- * block level.
- *
- * @param recsPerBlock
- * records per block
- * @param isCompressed
- * true if the archive is compressed
- * @throws IOException
- * more than one block has been read
- * @throws IOException
- * there was an error reading additional blocks.
- */
- public void resetBlockSize(final int recsPerBlock, final boolean isCompressed)
- throws IOException {
- this.isCompressed = isCompressed;
-
- blockSize = RECORD_SIZE * recsPerBlock;
-
- // save first block in case we need it again
- final byte[] oldBuffer = blockBuffer;
-
- // read rest of new block
- blockBuffer = new byte[blockSize];
- System.arraycopy(oldBuffer, 0, blockBuffer, 0, RECORD_SIZE);
- readFully(blockBuffer, RECORD_SIZE, blockSize - RECORD_SIZE);
-
- this.currBlkIdx = 0;
- this.readOffset = RECORD_SIZE;
- }
-
- /**
- * @see java.io.InputStream#available
- */
- @Override
- public int available() throws IOException {
- if (readOffset < blockSize) {
- return blockSize - readOffset;
- }
-
- return in.available();
- }
-
- /**
- * @see java.io.InputStream#read()
- */
- @Override
- public int read() throws IOException {
- throw new IllegalArgumentException(
- "all reads must be multiple of record size (" + RECORD_SIZE +
- " bytes.");
- }
-
- /**
- * {@inheritDoc}
- *
- *
reads the full given length unless EOF is reached.
- *
- * @param len length to read, must be a multiple of the stream's
- * record size
- */
- @Override
- public int read(final byte[] b, int off, final int len) throws IOException {
- if ((len % RECORD_SIZE) != 0) {
- throw new IllegalArgumentException(
- "all reads must be multiple of record size (" + RECORD_SIZE +
- " bytes.");
- }
-
- int bytes = 0;
-
- while (bytes < len) {
- // we need to read from the underlying stream.
- // this will reset readOffset value.
- // return -1 if there's a problem.
- if (readOffset == blockSize) {
- try {
- readBlock(true);
- } catch (ShortFileException sfe) { // NOSONAR
- return -1;
- }
- }
-
- int n = 0;
-
- if ((readOffset + (len - bytes)) <= blockSize) {
- // we can read entirely from the buffer.
- n = len - bytes;
- } else {
- // copy what we can from the buffer.
- n = blockSize - readOffset;
- }
-
- // copy data, increment counters.
- System.arraycopy(blockBuffer, readOffset, b, off, n);
- readOffset += n;
- bytes += n;
- off += n;
- }
-
- return bytes;
- }
-
- /**
- * Skip bytes. Same as read but without the arraycopy.
- *
- *
skips the full given length unless EOF is reached.
- *
- * @param len length to read, must be a multiple of the stream's
- * record size
- */
- @Override
- public long skip(final long len) throws IOException {
- if ((len % RECORD_SIZE) != 0) {
- throw new IllegalArgumentException(
- "all reads must be multiple of record size (" + RECORD_SIZE +
- " bytes.");
- }
-
- long bytes = 0;
-
- while (bytes < len) {
- // we need to read from the underlying stream.
- // this will reset readOffset value. We do not perform
- // any decompression if we won't eventually read the data.
- // return -1 if there's a problem.
- if (readOffset == blockSize) {
- try {
- readBlock((len - bytes) < blockSize);
- } catch (ShortFileException sfe) { // NOSONAR
- return -1;
- }
- }
-
- long n = 0;
-
- if ((readOffset + (len - bytes)) <= blockSize) {
- // we can read entirely from the buffer.
- n = len - bytes;
- } else {
- // copy what we can from the buffer.
- n = (long) blockSize - readOffset;
- }
-
- // do not copy data but still increment counters.
- readOffset += n;
- bytes += n;
- }
-
- return bytes;
- }
-
- /**
- * Close the input stream.
- *
- * @throws IOException on error
- */
- @Override
- public void close() throws IOException {
- if (in != null && in != System.in) {
- in.close();
- }
- }
-
- /**
- * Peek at the next record from the input stream and return the data.
- *
- * @return The record data.
- * @throws IOException on error
- */
- public byte[] peek() throws IOException {
- // we need to read from the underlying stream. This
- // isn't a problem since it would be the first step in
- // any subsequent read() anyway.
- if (readOffset == blockSize) {
- try {
- readBlock(true);
- } catch (ShortFileException sfe) { // NOSONAR
- return null;
- }
- }
-
- // copy data, increment counters.
- final byte[] b = new byte[RECORD_SIZE];
- System.arraycopy(blockBuffer, readOffset, b, 0, b.length);
-
- return b;
- }
-
- /**
- * Read a record from the input stream and return the data.
- *
- * @return The record data.
- * @throws IOException on error
- */
- public byte[] readRecord() throws IOException {
- final byte[] result = new byte[RECORD_SIZE];
-
- // the read implementation will loop internally as long as
- // input is available
- if (-1 == read(result, 0, result.length)) {
- throw new ShortFileException();
- }
-
- return result;
- }
-
- /**
- * Read next block. All decompression is handled here.
- *
- * @param decompress if false the buffer will not be decompressed.
- * This is an optimization for longer seeks.
- */
- private void readBlock(final boolean decompress) throws IOException {
- if (in == null) {
- throw new IOException("input buffer is closed");
- }
-
- if (!isCompressed || (currBlkIdx == -1)) {
- // file is not compressed
- readFully(blockBuffer, 0, blockSize);
- bytesRead += blockSize;
- } else {
- readFully(blockBuffer, 0, 4);
- bytesRead += 4;
-
- final int h = DumpArchiveUtil.convert32(blockBuffer, 0);
- final boolean compressed = (h & 0x01) == 0x01;
-
- if (!compressed) {
- // file is compressed but this block is not.
- readFully(blockBuffer, 0, blockSize);
- bytesRead += blockSize;
- } else {
- // this block is compressed.
- final int flags = (h >> 1) & 0x07;
- int length = (h >> 4) & 0x0FFFFFFF;
- final byte[] compBuffer = new byte[length];
- readFully(compBuffer, 0, length);
- bytesRead += length;
-
- if (!decompress) {
- // just in case someone reads the data.
- Arrays.fill(blockBuffer, (byte) 0);
- } else {
- switch (DumpArchiveConstants.COMPRESSION_TYPE.find(flags &
- 0x03)) {
- case ZLIB:
-
- final Inflater inflator = new Inflater();
- try {
- inflator.setInput(compBuffer, 0, compBuffer.length);
- length = inflator.inflate(blockBuffer);
-
- if (length != blockSize) {
- throw new ShortFileException();
- }
- } catch (final DataFormatException e) {
- throw new DumpArchiveException("bad data", e);
- } finally {
- inflator.end();
- }
-
- break;
-
- case BZLIB:
- throw new UnsupportedCompressionAlgorithmException(
- "BZLIB2");
-
- case LZO:
- throw new UnsupportedCompressionAlgorithmException(
- "LZO");
-
- default:
- throw new UnsupportedCompressionAlgorithmException();
- }
- }
- }
- }
-
- currBlkIdx++;
- readOffset = 0;
- }
-
- /**
- * Read buffer
- */
- private void readFully(final byte[] b, final int off, final int len)
- throws IOException {
- final int count = IOUtils.readFully(in, b, off, len);
- if (count < len) {
- throw new ShortFileException();
- }
- }
-
- /**
- * Get number of bytes read.
- */
- public long getBytesRead() {
- return bytesRead;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/UnrecognizedFormatException.java b/src/org/apache/commons/compress/archivers/dump/UnrecognizedFormatException.java
deleted file mode 100644
index 333aeacd62b..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/UnrecognizedFormatException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-
-/**
- * Unrecognized Format Exception. This is either not a recognized dump archive or there's
- * a bad tape segment header.
- */
-public class UnrecognizedFormatException extends DumpArchiveException {
- private static final long serialVersionUID = 1L;
-
- public UnrecognizedFormatException() {
- super("this is not a recognized format.");
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/UnsupportedCompressionAlgorithmException.java b/src/org/apache/commons/compress/archivers/dump/UnsupportedCompressionAlgorithmException.java
deleted file mode 100644
index 8c604030423..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/UnsupportedCompressionAlgorithmException.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.dump;
-
-
-/**
- * Unsupported compression algorithm. The dump archive uses an unsupported
- * compression algorithm (BZLIB2 or LZO).
- */
-public class UnsupportedCompressionAlgorithmException
- extends DumpArchiveException {
- private static final long serialVersionUID = 1L;
-
- public UnsupportedCompressionAlgorithmException() {
- super("this file uses an unsupported compression algorithm.");
- }
-
- public UnsupportedCompressionAlgorithmException(final String alg) {
- super("this file uses an unsupported compression algorithm: " + alg +
- ".");
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/dump/package.html b/src/org/apache/commons/compress/archivers/dump/package.html
deleted file mode 100644
index 72f3c68c40a..00000000000
--- a/src/org/apache/commons/compress/archivers/dump/package.html
+++ /dev/null
@@ -1,56 +0,0 @@
-
-
-
-
This package provides stream classes for reading archives
- using the Unix DUMP format. This format is similar to (and
- contemporary with) TAR but reads the raw filesystem directly.
- This means that writers are filesystem-specific even though the
- created archives are filesystem-agnostic.
-
-
-
Unlike other formats DUMP offers clean support for sparse files,
- extended attributes, and other file metadata. In addition DUMP
- supports incremental dump files can capture (most) file deletion.
- It also provides a native form of compression and will soon support
- native encryption as well.
-
-
-
In practice TAR archives are used for both distribution
- and backups. DUMP archives are used exclusively for backups.
-
-
-
Like any 30+-year-old application there are a number of variants.
- For pragmatic reasons we will only support archives with the
- 'new' tape header and inode formats. Other restrictions:
-
-
-
We only support ZLIB compression. The format
- also permits LZO and BZLIB compression.
-
Sparse files will have the holes filled.
-
MacOS finder and resource streams are ignored.
-
Extended attributes are not currently provided.
-
SELinux labels are not currently provided.
-
-
-
-
As of Apache Commons Compress 1.3 support for the dump format is
- read-only.
-
-
diff --git a/src/org/apache/commons/compress/archivers/examples/Archive.java b/src/org/apache/commons/compress/archivers/examples/Archive.java
deleted file mode 100644
index 1895a7d13db..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Archive.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.FileFilter;
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Consumes files and passes them to a sink, usually used to create an archive of them.
- * @since 1.17
- */
-public class Archive {
- /**
- * Sets up a chain of operations and consumes the files from a source of files.
- * @since 1.17
- */
- public interface ChainBuilder {
- /**
- * Adds a filter to the chain.
- * @param filter the filter to apply
- * @return an updated builder
- */
- ChainBuilder filter(Filter filter);
- /**
- * Adds a filter to the chain.
- * @param filter the filter to apply
- * @return an updated builder
- */
- ChainBuilder filter(FileFilter filter);
- /**
- * Adds a filter to the chain that filters out entries that cannot be read.
- * @return an updated builder
- */
- ChainBuilder skipUnreadable();
- /**
- * Adds a filter to the chain that filters out everything that is not a file.
- * @return an updated builder
- */
- ChainBuilder skipNonFiles();
- /**
- * Adds a transformer to the chain.
- * @param transformer transformer to apply
- * @return an updated builder
- */
- ChainBuilder map(Transformer transformer);
- /**
- * Adds a generic step to the chain.
- * @param step step to perform
- * @return an updated builder
- */
- ChainBuilder withStep(ChainStep step);
- /**
- * Actually consumes all the files supplied.
- * @param sink sink that the entries will be sent to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be written for other reasons
- */
- void to(Sink sink) throws IOException, ArchiveException;
- }
-
- /**
- * Sets the source of files to be a directory.
- * @param f the source directory
- * @return a builder for the chain to be created and run
- */
- public static ChainBuilder directory(File f) {
- return source(new DirectoryBasedSource(f));
- }
-
- /**
- * Sets the source of files to process.
- * @param source the source directory
- * @return a builder for the chain to be created and run
- */
- public static ChainBuilder source(Source source) {
- return new Builder(source);
- }
-
- private static class Builder implements ChainBuilder {
- private final Source source;
- private ChainDefinition chainDef = new ChainDefinition<>();
-
- Builder(Source source) {
- this.source = source;
- }
-
- @Override
- public ChainBuilder filter(Filter filter) {
- return withStep(filter);
- }
- @Override
- public ChainBuilder filter(FileFilter filter) {
- return filter(new FileFilterAdapter(filter));
- }
- @Override
- public ChainBuilder skipUnreadable() {
- return filter(new FileFilter() {
- @Override
- public boolean accept(File f) {
- return f.canRead();
- }
- });
- }
- @Override
- public ChainBuilder skipNonFiles() {
- return filter(new FileFilter() {
- @Override
- public boolean accept(File f) {
- return f.isFile();
- }
- });
- }
- @Override
- public ChainBuilder map(Transformer transformer) {
- return withStep(transformer);
- }
- @Override
- public ChainBuilder withStep(ChainStep step) {
- chainDef.add(step);
- return this;
- }
- @Override
- public void to(Sink sink) throws IOException, ArchiveException {
- chainDef.add(sink);
- chainDef.freeze();
- new ChainRunner(source, chainDef, sink).run();
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ArchiveCli.java b/src/org/apache/commons/compress/archivers/examples/ArchiveCli.java
deleted file mode 100644
index cb204602a8c..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ArchiveCli.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Simple command line tool that creates an archive from the contents of a directory.
- *
- *
Usage: ArchiveCli dir format archive
- * @since 1.17
- */
-public class ArchiveCli {
-
- public static void main(String[] args) throws IOException, ArchiveException {
- if (args.length != 3) {
- System.err.println("Usage: ArchiveCli dir format target");
- System.exit(1);
- }
- try (Sink sink = ArchiveSinks.forFile(args[1], new File(args[2]))) {
- Archive.directory(new File(args[0]))
- .to(sink);
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ArchiveEntrySource.java b/src/org/apache/commons/compress/archivers/examples/ArchiveEntrySource.java
deleted file mode 100644
index 402134fd3b1..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ArchiveEntrySource.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-
-/**
- * Combines Source and a factory for filter that skips unreadable entries.
- * @since 1.17
- */
-public interface ArchiveEntrySource extends Source {
-
- /**
- * Provides a filter that can be used to skip entries the
- * underlying source is unable to read the content of.
- * @return filter that can be used to skip entries the underlying
- * source is unable to read the content of
- */
- Filter skipUnreadable();
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ArchiveSinks.java b/src/org/apache/commons/compress/archivers/examples/ArchiveSinks.java
deleted file mode 100644
index f00fc4f3886..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ArchiveSinks.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.FileOutputStream;
-import java.nio.channels.Channels;
-import java.nio.channels.FileChannel;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.file.StandardOpenOption;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.ArchiveStreamFactory;
-import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
-
-/**
- * Supplies factory methods for file sinks that write to archives,
- * @since 1.17
- */
-public class ArchiveSinks {
- /**
- * Uses {@link ArchiveStreamFactory#createArchiveOutputStream}.
- *
- *
Will not support 7z.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param os the stream to write to.
- * @return a sink that consumes the files
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public static Sink forStream(String format, OutputStream os) throws IOException, ArchiveException {
- return new FileToArchiveSink(new ArchiveStreamFactory().createArchiveOutputStream(format, os));
- }
-
- /**
- * Uses {@link ArchiveStreamFactory#createArchiveOutputStream} unless
- * special handling for ZIP or 7z is required.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the file to write to.
- * @return a sink that consumes the files
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public static Sink forFile(String format, File target) throws IOException, ArchiveException {
- if (prefersSeekableByteChannel(format)) {
- return forChannel(format, FileChannel.open(target.toPath(), StandardOpenOption.WRITE,
- StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING));
- }
- return new FileToArchiveSink(new ArchiveStreamFactory()
- .createArchiveOutputStream(format, new FileOutputStream(target)));
- }
-
- /**
- * Uses {@link ArchiveStreamFactory#createArchiveOutputStream} unless
- * special handling for ZIP or 7z is required.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param c the channel to write to.
- * @return a sink that consumes the files
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public static Sink forChannel(String format, SeekableByteChannel c) throws IOException, ArchiveException {
- if (!prefersSeekableByteChannel(format)) {
- return forStream(format, Channels.newOutputStream(c));
- } else if (ArchiveStreamFactory.ZIP.equalsIgnoreCase(format)) {
- return new FileToArchiveSink(new ZipArchiveOutputStream(c));
- } else if (ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format)) {
- return new SevenZOutputFileSink(c);
- } else {
- throw new ArchiveException("don't know how to handle format " + format);
- }
- }
-
- private static boolean prefersSeekableByteChannel(String format) {
- return ArchiveStreamFactory.ZIP.equalsIgnoreCase(format) || ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ArchiveSources.java b/src/org/apache/commons/compress/archivers/examples/ArchiveSources.java
deleted file mode 100644
index 670eb2f6a49..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ArchiveSources.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.BufferedInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.FileInputStream;
-import java.nio.channels.Channels;
-import java.nio.channels.FileChannel;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.file.StandardOpenOption;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.archivers.ArchiveStreamFactory;
-import org.apache.commons.compress.archivers.sevenz.SevenZFile;
-import org.apache.commons.compress.archivers.zip.ZipFile;
-
-/**
- * Supplies factory methods for ArchiveEntry sources that read from archives,
- * @since 1.17
- */
-public class ArchiveSources {
- /**
- * Builder for {@link ArchiveEntrySource} that needs to know its format.
- * @since 1.17
- */
- public interface PendingFormat {
- /**
- * Signals the format shall be detcted automatically.
- * @return the configured source
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- ArchiveEntrySource detectFormat() throws IOException, ArchiveException;
- /**
- * Explicitly provides the expected format of the archive.
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @return the configured source
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- ArchiveEntrySource withFormat(String format) throws IOException, ArchiveException;
- }
-
- /**
- * Uses {@link ArchiveStreamFactory#createArchiveInputStream} unless special handling for ZIP or /z is required.
- *
- * @param f the file to read from
- * @return a builder that needs to know the format
- */
- public static PendingFormat forFile(final File f) {
- return new PendingFormat() {
- @Override
- public ArchiveEntrySource detectFormat() throws IOException, ArchiveException {
- String format = null;
- try (InputStream i = new BufferedInputStream(new FileInputStream(f))) {
- format = new ArchiveStreamFactory().detect(i);
- }
- return withFormat(format);
- }
- @Override
- public ArchiveEntrySource withFormat(String format) throws IOException, ArchiveException {
- if (prefersSeekableByteChannel(format)) {
- return forChannel(format, FileChannel.open(f.toPath(), StandardOpenOption.READ));
- }
- return new StreamBasedArchiveEntrySource(new ArchiveStreamFactory()
- .createArchiveInputStream(format, new BufferedInputStream(new FileInputStream(f))));
- }
- };
- }
-
- /**
- * Uses {@link ArchiveStreamFactory#createArchiveInputStream} unless special handling for ZIP or /z is required.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param c the channel to read from
- * @return the configured source
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public static ArchiveEntrySource forChannel(String format, SeekableByteChannel c)
- throws IOException, ArchiveException {
- if (!prefersSeekableByteChannel(format)) {
- return new StreamBasedArchiveEntrySource(new ArchiveStreamFactory()
- .createArchiveInputStream(format, Channels.newInputStream(c)));
- } else if (ArchiveStreamFactory.ZIP.equalsIgnoreCase(format)) {
- return new ZipArchiveEntrySource(c);
- } else if (ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format)) {
- return new SevenZArchiveEntrySource(c);
- }
- throw new ArchiveException("don't know how to handle format " + format);
- }
-
- /**
- * Uses {@link ArchiveStreamFactory#createArchiveInputStream}.
- *
- *
Will not support 7z.
- *
- * @param in the stream to read from
- * @return a builder that needs to know the format
- */
- public static PendingFormat forStream(final InputStream in) {
- return new PendingFormat() {
- @Override
- public ArchiveEntrySource detectFormat() throws IOException, ArchiveException {
- return new StreamBasedArchiveEntrySource(new ArchiveStreamFactory().createArchiveInputStream(in));
- }
- @Override
- public ArchiveEntrySource withFormat(String format) throws IOException, ArchiveException {
- return new StreamBasedArchiveEntrySource(new ArchiveStreamFactory()
- .createArchiveInputStream(format, in));
- }
- };
- }
-
- private static boolean prefersSeekableByteChannel(String format) {
- return ArchiveStreamFactory.ZIP.equalsIgnoreCase(format) || ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/Archiver.java b/src/org/apache/commons/compress/archivers/examples/Archiver.java
deleted file mode 100644
index b34d16bb786..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Archiver.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.BufferedInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.Channels;
-import java.nio.channels.FileChannel;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.file.Files;
-import java.nio.file.StandardOpenOption;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.archivers.ArchiveStreamFactory;
-import org.apache.commons.compress.archivers.sevenz.SevenZOutputFile;
-import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Provides a high level API for creating archives.
- * @since 1.17
- */
-public class Archiver {
-
- private interface ArchiveEntryCreator {
- ArchiveEntry create(File f, String entryName) throws IOException;
- }
-
- private interface ArchiveEntryConsumer {
- void accept(File source, ArchiveEntry entry) throws IOException;
- }
-
- private interface Finisher {
- void finish() throws IOException;
- }
-
- /**
- * Creates an archive {@code target} using the format {@code
- * format} by recursively including all files and directories in
- * {@code directory}.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the file to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(String format, File target, File directory) throws IOException, ArchiveException {
- if (prefersSeekableByteChannel(format)) {
- try (SeekableByteChannel c = FileChannel.open(target.toPath(), StandardOpenOption.WRITE,
- StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
- create(format, c, directory);
- }
- return;
- }
- try (OutputStream o = Files.newOutputStream(target.toPath())) {
- create(format, o, directory);
- }
- }
-
- /**
- * Creates an archive {@code target} using the format {@code
- * format} by recursively including all files and directories in
- * {@code directory}.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the stream to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(String format, OutputStream target, File directory) throws IOException, ArchiveException {
- create(new ArchiveStreamFactory().createArchiveOutputStream(format, target), directory);
- }
-
- /**
- * Creates an archive {@code target} using the format {@code
- * format} by recursively including all files and directories in
- * {@code directory}.
- *
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @param target the channel to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(String format, SeekableByteChannel target, File directory)
- throws IOException, ArchiveException {
- if (!prefersSeekableByteChannel(format)) {
- create(format, Channels.newOutputStream(target), directory);
- } else if (ArchiveStreamFactory.ZIP.equalsIgnoreCase(format)) {
- create(new ZipArchiveOutputStream(target), directory);
- } else if (ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format)) {
- create(new SevenZOutputFile(target), directory);
- } else {
- // never reached as prefersSeekableByteChannel only returns true for ZIP and 7z
- throw new ArchiveException("don't know how to handle format " + format);
- }
- }
-
- /**
- * Creates an archive {@code target} by recursively including all
- * files and directories in {@code directory}.
- *
- * @param target the stream to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be created for other reasons
- */
- public void create(final ArchiveOutputStream target, File directory)
- throws IOException, ArchiveException {
- create(directory, new ArchiveEntryCreator() {
- public ArchiveEntry create(File f, String entryName) throws IOException {
- return target.createArchiveEntry(f, entryName);
- }
- }, new ArchiveEntryConsumer() {
- public void accept(File source, ArchiveEntry e) throws IOException {
- target.putArchiveEntry(e);
- if (!e.isDirectory()) {
- try (InputStream in = new BufferedInputStream(Files.newInputStream(source.toPath()))) {
- IOUtils.copy(in, target);
- }
- }
- target.closeArchiveEntry();
- }
- }, new Finisher() {
- public void finish() throws IOException {
- target.finish();
- }
- });
- }
-
- /**
- * Creates an archive {@code target} by recursively including all
- * files and directories in {@code directory}.
- *
- * @param target the file to write the new archive to.
- * @param directory the directory that contains the files to archive.
- * @throws IOException if an I/O error occurs
- */
- public void create(final SevenZOutputFile target, File directory) throws IOException {
- create(directory, new ArchiveEntryCreator() {
- public ArchiveEntry create(File f, String entryName) throws IOException {
- return target.createArchiveEntry(f, entryName);
- }
- }, new ArchiveEntryConsumer() {
- public void accept(File source, ArchiveEntry e) throws IOException {
- target.putArchiveEntry(e);
- if (!e.isDirectory()) {
- final byte[] buffer = new byte[8024];
- int n = 0;
- long count = 0;
- try (InputStream in = new BufferedInputStream(Files.newInputStream(source.toPath()))) {
- while (-1 != (n = in.read(buffer))) {
- target.write(buffer, 0, n);
- count += n;
- }
- }
- }
- target.closeArchiveEntry();
- }
- }, new Finisher() {
- public void finish() throws IOException {
- target.finish();
- }
- });
- }
-
- private boolean prefersSeekableByteChannel(String format) {
- return ArchiveStreamFactory.ZIP.equalsIgnoreCase(format) || ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format);
- }
-
- private void create(File directory, ArchiveEntryCreator creator, ArchiveEntryConsumer consumer,
- Finisher finisher) throws IOException {
- create("", directory, creator, consumer);
- finisher.finish();
- }
-
- private void create(String prefix, File directory, ArchiveEntryCreator creator, ArchiveEntryConsumer consumer)
- throws IOException {
- File[] children = directory.listFiles();
- if (children == null) {
- return;
- }
- for (File f : children) {
- String entryName = prefix + f.getName() + (f.isDirectory() ? "/" : "");
- consumer.accept(f, creator.create(f, entryName));
- if (f.isDirectory()) {
- create(entryName, f, creator, consumer);
- }
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/Chain.java b/src/org/apache/commons/compress/archivers/examples/Chain.java
deleted file mode 100644
index 86d2e1c60d8..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Chain.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.IOException;
-import java.util.Iterator;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Encapsulates the execution flow of a chain of operations.
- * @since 1.17
- */
-public class Chain {
-
- private final Iterator> chain;
-
- /**
- * Instantiates a new chain.
- *
- * @param chain the steps to take in order.
- */
- public Chain(Iterator> chain) {
- this.chain = chain;
- }
-
- /**
- * Invokes the next step of the chain.
- *
- * @param payload the payload to pass to the next step
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if an archive format related error occurs
- */
- public void next(ChainPayload payload) throws IOException, ArchiveException {
- if (chain.hasNext()) {
- chain.next().process(payload, this);
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ChainDefinition.java b/src/org/apache/commons/compress/archivers/examples/ChainDefinition.java
deleted file mode 100644
index d8a387f8485..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ChainDefinition.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.util.Deque;
-import java.util.LinkedList;
-
-/**
- * The recipe for building a {@link Chain}.
- * @since 1.17
- */
-class ChainDefinition {
- private final Deque> steps = new LinkedList<>();
- private volatile boolean frozen = false;
-
- /**
- * Adds a step.
- * @throws IllegalStateException if the definition is already frozen.
- */
- void add(ChainStep step) {
- if (frozen) {
- throw new IllegalStateException("the definition is already frozen");
- }
- steps.addLast(step);
- }
-
- /**
- * Freezes the definition.
- *
- *
Once this method has been invoked {@link #add} can no longer be invoked.
- *
- * @throws IllegalStateException if the last step of the definition is not a sink.
- */
- void freeze() {
- if (!frozen) {
- frozen = true;
- if (!(steps.getLast() instanceof Sink)) {
- throw new IllegalStateException("this definition doesn't end in a sink");
- }
- }
- }
-
- /**
- * Returns a chain for this definition.
- *
- * @throws IllegalStateException if the definition is not frozen.
- */
- Chain chain() {
- if (!frozen) {
- throw new IllegalStateException("the definition hasn't been frozen, yet");
- }
- return new Chain(steps.iterator());
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ChainPayload.java b/src/org/apache/commons/compress/archivers/examples/ChainPayload.java
deleted file mode 100644
index 11d86181226..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ChainPayload.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.InputStream;
-
-/**
- * The data that is pushed through a chain.
- * @since 1.17
- */
-public class ChainPayload {
- private final T entry;
- private final String entryName;
- private final Supplier input;
- /**
- * Constructs the payload.
- * @param entry entry the actual payload
- * @param entryName the local name of the entry. This may - for
- * example - be the file name relative to a directory.
- * @param input supplies an input stream to the entry's
- * content. Is not expected to be called more than once.
- */
- public ChainPayload(T entry, String entryName, Supplier input) {
- this.entry = entry;
- this.entryName = entryName;
- this.input = input;
- }
- /**
- * Provides the real payload.
- * @return the real playoad
- *
- */
- public T getEntry() {
- return entry;
- }
- /**
- * Provides the local name of the entry.
- *
- *
This may - for example - be the file name relative to a
- * directory.
- *
- * @return local name of the entry
- */
- public String getEntryName() {
- return entryName;
- }
- /**
- * Returns a {@link Supplier} that can be used to read the entry's content.
- *
- *
The supplier is not required to be callable more than
- * once.
- *
- * @return supplier of input
- */
- public Supplier getInput() {
- return input;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ChainRunner.java b/src/org/apache/commons/compress/archivers/examples/ChainRunner.java
deleted file mode 100644
index 5f435890bff..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ChainRunner.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Contains the execution logic of a full chain including a source.
- * @since 1.17
- */
-class ChainRunner {
- private final Source source;
- private final ChainDefinition chainDef;
- private final Sink sink;
-
- ChainRunner(Source source, ChainDefinition chainDef, Sink sink) {
- this.source = source;
- this.chainDef = chainDef;
- this.sink = sink;
- }
-
- void run() throws IOException, ArchiveException {
- ThrowingIterator> iter = source.get();
- while (iter.hasNext()) {
- chainDef.chain().next(iter.next());
- }
- sink.finish();
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ChainStep.java b/src/org/apache/commons/compress/archivers/examples/ChainStep.java
deleted file mode 100644
index ee0e4b3acf5..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ChainStep.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * A step inside of a {@link Chain}.
- * @since 1.17
- */
-public interface ChainStep {
- /**
- * Process the chain's payload.
- *
- *
Any non-terminal step that invokes the {@link Supplier} of
- * the payload is responsible for providing a fresh supplier if
- * the chain is to be continued.
- *
- * @param payload the payload.
- * @param chain chain to return control to once processing is done.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if an archive format related error occurs
- */
- void process(ChainPayload payload, Chain chain) throws IOException, ArchiveException;
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/DirectoryBasedSource.java b/src/org/apache/commons/compress/archivers/examples/DirectoryBasedSource.java
deleted file mode 100644
index c6ca484c582..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/DirectoryBasedSource.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
-/**
- * Recursively returns all files and directories contained inside of a base directory.
- * @since 1.17
- */
-public class DirectoryBasedSource implements Source {
-
- private final File dir;
-
- /**
- * Sets up a directory as source.
- *
- * @param dir the directory to provide entries from.
- * @throws IllegalArgumentException if dir doesn't exist or is not a directory
- */
- public DirectoryBasedSource(File dir) {
- if (!dir.isDirectory()) {
- throw new IllegalArgumentException("dir is not a readable directory");
- }
- this.dir = dir;
- }
-
- @Override
- public ThrowingIterator> get() throws IOException {
- return new DirectoryIterator("", dir);
- }
-
- @Override
- public void close() {
- }
-
- private static class DirectoryIterator implements ThrowingIterator> {
- private final Iterator files;
- private final String namePrefix;
- private DirectoryIterator nestedIterator;
- DirectoryIterator(String namePrefix, File dir) throws IOException {
- this.namePrefix = namePrefix;
- File[] fs = dir.listFiles();
- files = fs == null ? Collections.emptyIterator() : Arrays.asList(fs).iterator();
- }
-
- @Override
- public boolean hasNext() throws IOException {
- if (nestedIterator != null && nestedIterator.hasNext()) {
- return true;
- }
- if (nestedIterator != null) {
- nestedIterator = null;
- }
- return files.hasNext();
- }
-
- @Override
- public ChainPayload next() throws IOException {
- if (!hasNext()) {
- throw new NoSuchElementException();
- }
- if (nestedIterator != null) {
- return nestedIterator.next();
- }
- final File f = files.next();
- String entryName = namePrefix + f.getName();
- if (f.isDirectory()) {
- entryName += "/";
- nestedIterator = new DirectoryIterator(entryName, f);
- }
- return new ChainPayload(f, entryName, new Supplier() {
- @Override
- public InputStream get() throws IOException {
- return new FileInputStream(f);
- }
- });
- }
-
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/DirectoryBasedSupplier.java b/src/org/apache/commons/compress/archivers/examples/DirectoryBasedSupplier.java
deleted file mode 100644
index 1b8cc9180ea..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/DirectoryBasedSupplier.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
-/**
- * Recursively returns all files and directories contained inside of a base directory.
- * @since 1.17
- */
-public class DirectoryBasedSupplier
- implements Supplier>> {
-
- private final File dir;
-
- /**
- * @param dir the directory to provide entries from.
- */
- public DirectoryBasedSupplier(File dir) {
- if (!dir.isDirectory()) {
- throw new IllegalArgumentException("dir is not a readable directory");
- }
- this.dir = dir;
- }
-
- @Override
- public ThrowingIterator> get() throws IOException {
- return new DirectoryIterator("", dir);
- }
-
- private static class DirectoryIterator implements ThrowingIterator> {
- private final Iterator files;
- private final String namePrefix;
- private DirectoryIterator nestedIterator;
- DirectoryIterator(String namePrefix, File dir) throws IOException {
- this.namePrefix = namePrefix;
- File[] fs = dir.listFiles();
- files = fs == null ? Collections.emptyIterator() : Arrays.asList(fs).iterator();
- }
-
- @Override
- public boolean hasNext() throws IOException {
- if (nestedIterator != null && nestedIterator.hasNext()) {
- return true;
- }
- if (nestedIterator != null) {
- nestedIterator = null;
- }
- return files.hasNext();
- }
-
- @Override
- public ChainPayload next() throws IOException {
- if (!hasNext()) {
- throw new NoSuchElementException();
- }
- if (nestedIterator != null) {
- return nestedIterator.next();
- }
- final File f = files.next();
- String entryName = namePrefix + f.getName();
- if (f.isDirectory()) {
- entryName += "/";
- nestedIterator = new DirectoryIterator(entryName, f);
- }
- return new ChainPayload(f, entryName, new Supplier() {
- @Override
- public InputStream get() throws IOException {
- return new FileInputStream(f);
- }
- });
- }
-
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/DirectorySink.java b/src/org/apache/commons/compress/archivers/examples/DirectorySink.java
deleted file mode 100644
index 7a34858cf69..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/DirectorySink.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * A sink that expands archive entries into a directory.
- * @since 1.17
- */
-public class DirectorySink extends Sink {
- private final File dir;
- private final String dirPath;
-
- /**
- * Sets up a directory as sink.
- *
- * @param dir the directory to provide entries from.
- * @throws IOException if the canonical path of the directory cannot be determined
- * @throws IllegalArgumentException if dir doesn't exist or is not a directory
- */
- public DirectorySink(File dir) throws IOException {
- if (!dir.isDirectory()) {
- throw new IllegalArgumentException("dir is not a readable directory");
- }
- this.dir = dir;
- dirPath = dir.getCanonicalPath();
- }
-
- @Override
- public void consume(ChainPayload payload) throws IOException, ArchiveException {
- File f = new File(dir, payload.getEntryName());
- if (!f.getCanonicalPath().startsWith(dirPath)) {
- throw new IOException("expanding " + payload.getEntryName() + " would create file outside of " + dir);
- }
- if (payload.getEntry().isDirectory()) {
- f.mkdirs();
- } else {
- f.getParentFile().mkdirs();
- try (OutputStream o = new FileOutputStream(f);
- InputStream i = payload.getInput().get()) {
- IOUtils.copy(i, o);
- }
- }
- }
-
- @Override
- public void close() {
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/Expand.java b/src/org/apache/commons/compress/archivers/examples/Expand.java
deleted file mode 100644
index d6ece27a3f7..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Expand.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Consumes archive entries and passes them to a sink, usually used to
- * expand an archive.
- * @since 1.17
- */
-public class Expand {
- /**
- * Sets up a chain of operations and consumes the entries from a source of archive entries.
- * @since 1.17
- */
- public interface ChainBuilder {
- /**
- * Adds a filter to the chain.
- * @param filter the filter to apply
- * @return an updated builder
- */
- ChainBuilder filter(Filter filter);
- /**
- * Adds a filter to the chain that filters out entries that cannot be read.
- * @return an updated builder
- */
- ChainBuilder skipUnreadable();
- /**
- * Adds a filter to the chain that suppresses all directory entries.
- * @return an updated builder
- */
- ChainBuilder skipDirectories();
- /**
- * Adds a transformer to the chain.
- * @param transformer transformer to apply
- * @return an updated builder
- */
- ChainBuilder map(Transformer transformer);
- /**
- * Adds a generic step to the chain.
- * @return an updated builder
- * @param step step to perform
- */
- ChainBuilder withStep(ChainStep step);
- /**
- * Actually consumes all the entries supplied.
- * @param sink sink that the entries will be sent to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the source archive cannot be read for other reasons
- */
- void to(Sink sink) throws IOException, ArchiveException;
- }
-
- /**
- * Sets the source of entries to process.
- * @param source the source
- * @return a builder for the chain to be created and run
- */
- public static ChainBuilder source(ArchiveEntrySource source) {
- return new Builder(source);
- }
-
- private static class Builder implements ChainBuilder {
- private final ArchiveEntrySource source;
- private ChainDefinition chainDef = new ChainDefinition<>();
-
- Builder(ArchiveEntrySource source) {
- this.source = source;
- }
-
- @Override
- public ChainBuilder filter(Filter filter) {
- return withStep(filter);
- }
- @Override
- public ChainBuilder skipUnreadable() {
- return filter(source.skipUnreadable());
- }
- @Override
- public ChainBuilder skipDirectories() {
- return filter(new Filter() {
- @Override
- public boolean accept(String entryName, ArchiveEntry e) {
- return !e.isDirectory();
- }
- });
- }
- @Override
- public ChainBuilder map(Transformer transformer) {
- return withStep(transformer);
- }
- @Override
- public ChainBuilder withStep(ChainStep step) {
- chainDef.add(step);
- return this;
- }
- @Override
- public void to(Sink sink) throws IOException, ArchiveException {
- chainDef.add(sink);
- chainDef.freeze();
- new ChainRunner(source, chainDef, sink).run();
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ExpandCli.java b/src/org/apache/commons/compress/archivers/examples/ExpandCli.java
deleted file mode 100644
index fd264ce82c1..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ExpandCli.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Simple command line tool that extracts an archive into a directory.
- *
- *
Usage: ExpandCli archive dir [format]
- * @since 1.17
- */
-public class ExpandCli {
-
- public static void main(String[] args) throws IOException, ArchiveException {
- if (args.length < 2 || args.length > 3) {
- System.err.println("Usage: ExpandCli dir archive [format]");
- System.exit(1);
- } else if (args.length == 2) {
- try (ArchiveEntrySource source = ArchiveSources.forFile(new File(args[0])).detectFormat()) {
- Expand.source(source).to(new DirectorySink(new File(args[1])));
- }
- } else {
- try (ArchiveEntrySource source = ArchiveSources.forFile(new File(args[0])).withFormat(args[2])) {
- Expand.source(source).to(new DirectorySink(new File(args[1])));
- }
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/Expander.java b/src/org/apache/commons/compress/archivers/examples/Expander.java
deleted file mode 100644
index 5644451f66c..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Expander.java
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.BufferedInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.Channels;
-import java.nio.channels.FileChannel;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.file.Files;
-import java.nio.file.StandardOpenOption;
-import java.util.Enumeration;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.archivers.ArchiveStreamFactory;
-import org.apache.commons.compress.archivers.sevenz.SevenZFile;
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipFile;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Provides a high level API for expanding archives.
- * @since 1.17
- */
-public class Expander {
-
- private interface ArchiveEntrySupplier {
- ArchiveEntry getNextReadableEntry() throws IOException;
- }
-
- private interface EntryWriter {
- void writeEntryDataTo(ArchiveEntry entry, OutputStream out) throws IOException;
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- *
Tries to auto-detect the archive's format.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(File archive, File targetDirectory) throws IOException, ArchiveException {
- String format = null;
- try (InputStream i = new BufferedInputStream(Files.newInputStream(archive.toPath()))) {
- format = new ArchiveStreamFactory().detect(i);
- }
- expand(format, archive, targetDirectory);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(String format, File archive, File targetDirectory) throws IOException, ArchiveException {
- if (prefersSeekableByteChannel(format)) {
- try (SeekableByteChannel c = FileChannel.open(archive.toPath(), StandardOpenOption.READ)) {
- expand(format, c, targetDirectory);
- }
- return;
- }
- try (InputStream i = new BufferedInputStream(Files.newInputStream(archive.toPath()))) {
- expand(format, i, targetDirectory);
- }
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- *
Tries to auto-detect the archive's format.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(InputStream archive, File targetDirectory) throws IOException, ArchiveException {
- expand(new ArchiveStreamFactory().createArchiveInputStream(archive), targetDirectory);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(String format, InputStream archive, File targetDirectory)
- throws IOException, ArchiveException {
- expand(new ArchiveStreamFactory().createArchiveInputStream(format, archive), targetDirectory);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @param format the archive format. This uses the same format as
- * accepted by {@link ArchiveStreamFactory}.
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(String format, SeekableByteChannel archive, File targetDirectory)
- throws IOException, ArchiveException {
- if (!prefersSeekableByteChannel(format)) {
- expand(format, Channels.newInputStream(archive), targetDirectory);
- } else if (ArchiveStreamFactory.ZIP.equalsIgnoreCase(format)) {
- expand(new ZipFile(archive), targetDirectory);
- } else if (ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format)) {
- expand(new SevenZFile(archive), targetDirectory);
- } else {
- // never reached as prefersSeekableByteChannel only returns true for ZIP and 7z
- throw new ArchiveException("don't know how to handle format " + format);
- }
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(final ArchiveInputStream archive, File targetDirectory)
- throws IOException, ArchiveException {
- expand(new ArchiveEntrySupplier() {
- @Override
- public ArchiveEntry getNextReadableEntry() throws IOException {
- ArchiveEntry next = archive.getNextEntry();
- while (next != null && !archive.canReadEntryData(next)) {
- next = archive.getNextEntry();
- }
- return next;
- }
- }, new EntryWriter() {
- @Override
- public void writeEntryDataTo(ArchiveEntry entry, OutputStream out) throws IOException {
- IOUtils.copy(archive, out);
- }
- }, targetDirectory);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(final ZipFile archive, File targetDirectory)
- throws IOException, ArchiveException {
- final Enumeration entries = archive.getEntries();
- expand(new ArchiveEntrySupplier() {
- @Override
- public ArchiveEntry getNextReadableEntry() throws IOException {
- ZipArchiveEntry next = entries.hasMoreElements() ? entries.nextElement() : null;
- while (next != null && !archive.canReadEntryData(next)) {
- next = entries.hasMoreElements() ? entries.nextElement() : null;
- }
- return next;
- }
- }, new EntryWriter() {
- @Override
- public void writeEntryDataTo(ArchiveEntry entry, OutputStream out) throws IOException {
- try (InputStream in = archive.getInputStream((ZipArchiveEntry) entry)) {
- IOUtils.copy(in, out);
- }
- }
- }, targetDirectory);
- }
-
- /**
- * Expands {@code archive} into {@code targetDirectory}.
- *
- * @param archive the file to expand
- * @param targetDirectory the directory to write to
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if the archive cannot be read for other reasons
- */
- public void expand(final SevenZFile archive, File targetDirectory)
- throws IOException, ArchiveException {
- expand(new ArchiveEntrySupplier() {
- @Override
- public ArchiveEntry getNextReadableEntry() throws IOException {
- return archive.getNextEntry();
- }
- }, new EntryWriter() {
- @Override
- public void writeEntryDataTo(ArchiveEntry entry, OutputStream out) throws IOException {
- final byte[] buffer = new byte[8024];
- int n = 0;
- long count = 0;
- while (-1 != (n = archive.read(buffer))) {
- out.write(buffer, 0, n);
- count += n;
- }
- }
- }, targetDirectory);
- }
-
- private boolean prefersSeekableByteChannel(String format) {
- return ArchiveStreamFactory.ZIP.equalsIgnoreCase(format) || ArchiveStreamFactory.SEVEN_Z.equalsIgnoreCase(format);
- }
-
- private void expand(ArchiveEntrySupplier supplier, EntryWriter writer, File targetDirectory)
- throws IOException {
- String targetDirPath = targetDirectory.getCanonicalPath() + File.separatorChar;
- ArchiveEntry nextEntry = supplier.getNextReadableEntry();
- while (nextEntry != null) {
- File f = new File(targetDirectory, nextEntry.getName());
- if (!f.getCanonicalPath().startsWith(targetDirPath)) {
- throw new IOException("expanding " + nextEntry.getName()
- + " would create file outside of " + targetDirectory);
- }
- if (nextEntry.isDirectory()) {
- if (!f.isDirectory() && !f.mkdirs()) {
- throw new IOException("failed to create directory " + f);
- }
- } else {
- File parent = f.getParentFile();
- if (!parent.isDirectory() && !parent.mkdirs()) {
- throw new IOException("failed to create directory " + parent);
- }
- try (OutputStream o = Files.newOutputStream(f.toPath())) {
- writer.writeEntryDataTo(nextEntry, o);
- }
- }
- nextEntry = supplier.getNextReadableEntry();
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/FileFilterAdapter.java b/src/org/apache/commons/compress/archivers/examples/FileFilterAdapter.java
deleted file mode 100644
index 9f5a8464587..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/FileFilterAdapter.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.FileFilter;
-
-/**
- * @since 1.17
- */
-public class FileFilterAdapter extends Filter {
- private final FileFilter filter;
- public FileFilterAdapter(FileFilter f) {
- filter = f;
- }
-
- @Override
- public boolean accept(String entryName, File entry) {
- return filter.accept(entry);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/FileToArchiveSink.java b/src/org/apache/commons/compress/archivers/examples/FileToArchiveSink.java
deleted file mode 100644
index 7f9fa3d4776..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/FileToArchiveSink.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.BufferedInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Sink that creates an archive from files.
- * @since 1.17
- */
-public class FileToArchiveSink extends Sink {
- private final ArchiveOutputStream os;
-
- /**
- * Wraps an ArchiveOutputStream.
- *
- * @param os the stream to write to
- */
- public FileToArchiveSink(ArchiveOutputStream os) {
- this.os = os;
- }
-
- @Override
- public void consume(ChainPayload payload) throws IOException, ArchiveException {
- ArchiveEntry e = os.createArchiveEntry(payload.getEntry(), payload.getEntryName());
- os.putArchiveEntry(e);
- if (!payload.getEntry().isDirectory()) {
- try (InputStream in = new BufferedInputStream(payload.getInput().get())) {
- IOUtils.copy(in, os);
- }
- }
- os.closeArchiveEntry();
- }
-
- @Override
- public void finish() throws IOException {
- os.finish();
- }
-
- @Override
- public void close() throws IOException {
- os.close();
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/Filter.java b/src/org/apache/commons/compress/archivers/examples/Filter.java
deleted file mode 100644
index 84e670cd4b7..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Filter.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Filtering stage of a {@link Expand} or {@link Archive} chain.
- * @since 1.17
- */
-public abstract class Filter implements ChainStep {
- /**
- * Decides whether to process an entry or not.
- *
- * @param entryName name of the entry
- * @param entry the entry
- * @return true if the entry shall be processed.
- */
- public abstract boolean accept(String entryName, T entry);
-
- @Override
- public void process(ChainPayload payload, Chain chain) throws IOException, ArchiveException {
- if (accept(payload.getEntryName(), payload.getEntry())) {
- chain.next(payload);
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ListerCli.java b/src/org/apache/commons/compress/archivers/examples/ListerCli.java
deleted file mode 100644
index 36f6efab2c3..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ListerCli.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-
-/**
- * Simple command line application that lists the contents of an archive.
- *
- *
The name of the archive must be given as a command line argument.
- *
The optional second argument defines the archive type, in case the format is not recognized.
- *
- * @since 1.17
- */
-public final class ListerCli {
-
- public static void main(final String[] args) throws Exception {
- if (args.length == 0) {
- usage();
- return;
- }
- System.out.println("Analysing " + args[0]);
- final Sink sink = new Sink() {
- @Override
- public void consume(ChainPayload payload) {
- System.out.println(payload.getEntry().getName());
- }
- @Override
- public void close() {
- }
- };
-
- final File f = new File(args[0]);
- if (!f.isFile()) {
- System.err.println(f + " doesn't exist or is a directory");
- } else if (args.length == 1) {
- try (ArchiveEntrySource source = ArchiveSources.forFile(f).detectFormat()) {
- Expand.source(source).to(sink);
- }
- } else {
- try (ArchiveEntrySource source = ArchiveSources.forFile(f).withFormat(args[1])) {
- Expand.source(source).to(sink);
- }
- }
- }
-
- private static void usage() {
- System.out.println("Parameters: archive-name [archive-type]");
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/SevenZArchiveEntrySource.java b/src/org/apache/commons/compress/archivers/examples/SevenZArchiveEntrySource.java
deleted file mode 100644
index 9f38b386b87..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/SevenZArchiveEntrySource.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.channels.SeekableByteChannel;
-import java.util.NoSuchElementException;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.utils.NoCloseInputStream;
-import org.apache.commons.compress.archivers.sevenz.SevenZFile;
-
-/**
- * Supplier based on {@link SevenZFile}s.
- * @since 1.17
- */
-public class SevenZArchiveEntrySource implements ArchiveEntrySource {
-
- private final SevenZFile sf;
-
- public SevenZArchiveEntrySource(File f) throws IOException {
- this(new SevenZFile(f));
- }
-
- public SevenZArchiveEntrySource(SeekableByteChannel c) throws IOException {
- this(new SevenZFile(c));
- }
-
- public SevenZArchiveEntrySource(SevenZFile sf) {
- this.sf = sf;
- }
-
- @Override
- public ThrowingIterator> get() throws IOException {
- return new SevenZFileIterator(sf);
- }
-
- @Override
- public void close() throws IOException {
- sf.close();
- }
-
- @Override
- public Filter skipUnreadable() {
- return new Filter() {
- @Override
- public boolean accept(String entryName, ArchiveEntry entry) {
- return true;
- }
- };
- }
-
- private static class SevenZFileIterator implements ThrowingIterator> {
- private final SevenZFile sf;
- private ArchiveEntry nextEntry;
- private boolean nextEntryConsumed;
- SevenZFileIterator(SevenZFile sf) throws IOException {
- this.sf = sf;
- nextEntry = sf.getNextEntry();
- nextEntryConsumed = false;
- }
-
- @Override
- public boolean hasNext() throws IOException {
- if (nextEntry == null || nextEntryConsumed) {
- nextEntry = sf.getNextEntry();
- nextEntryConsumed = false;
- }
- return nextEntry != null && !nextEntryConsumed;
- }
-
- @Override
- public ChainPayload next() throws IOException {
- if (!hasNext()) {
- throw new NoSuchElementException();
- }
- nextEntryConsumed = true;
- return new ChainPayload(nextEntry, nextEntry.getName(), new Supplier() {
- @Override
- public InputStream get() throws IOException {
- return new SevenZFileInputStream(sf);
- }
- });
- }
-
- }
-
- private static class SevenZFileInputStream extends InputStream {
- private final SevenZFile sf;
- SevenZFileInputStream(SevenZFile sf) {
- this.sf = sf;
- }
- @Override
- public int read() throws IOException {
- return sf.read();
- }
- @Override
- public int read(byte[] b) throws IOException {
- return read(b, 0, b.length);
- }
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- return sf.read(b, off, len);
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/SevenZOutputFileSink.java b/src/org/apache/commons/compress/archivers/examples/SevenZOutputFileSink.java
deleted file mode 100644
index f9a1e14e95a..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/SevenZOutputFileSink.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.BufferedInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.channels.SeekableByteChannel;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.sevenz.SevenZOutputFile;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Sink that creates a 7z archive from files.
- * @since 1.17
- */
-public class SevenZOutputFileSink extends Sink {
-
- private final SevenZOutputFile outFile;
-
- public SevenZOutputFileSink(File f) throws IOException {
- this(new SevenZOutputFile(f));
- }
-
- public SevenZOutputFileSink(SeekableByteChannel c) throws IOException {
- this(new SevenZOutputFile(c));
- }
-
- public SevenZOutputFileSink(SevenZOutputFile outFile) {
- this.outFile = outFile;
- }
-
- @Override
- public void consume(ChainPayload payload) throws IOException, ArchiveException {
- ArchiveEntry e = outFile.createArchiveEntry(payload.getEntry(), payload.getEntryName());
- outFile.putArchiveEntry(e);
- if (!payload.getEntry().isDirectory()) {
- final byte[] buffer = new byte[8024];
- int n = 0;
- long count = 0;
- try (InputStream in = new BufferedInputStream(payload.getInput().get())) {
- while (-1 != (n = in.read(buffer))) {
- outFile.write(buffer, 0, n);
- count += n;
- }
- }
- }
- outFile.closeArchiveEntry();
- }
-
- @Override
- public void finish() throws IOException {
- outFile.finish();
- }
-
- @Override
- public void close() throws IOException {
- outFile.close();
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/Sink.java b/src/org/apache/commons/compress/archivers/examples/Sink.java
deleted file mode 100644
index 7e143696f59..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Sink.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.Closeable;
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Final stage of a {@link Expand} or {@link Archive} chain.
- * @since 1.17
- */
-public abstract class Sink implements ChainStep, Closeable {
- /**
- * Consume a single entry.
- *
- * @param payload the entry to consume
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if an archive format related error occurs
- */
- public abstract void consume(ChainPayload payload) throws IOException, ArchiveException;
-
- /**
- * Is invoked once all entries have been processed.
- *
- *
This implementation is empty.
- *
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if an archive format related error occurs
- */
- public void finish() throws IOException, ArchiveException {
- }
-
- @Override
- public void process(ChainPayload payload, Chain chain) throws IOException, ArchiveException {
- consume(payload);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/Source.java b/src/org/apache/commons/compress/archivers/examples/Source.java
deleted file mode 100644
index 4a51efe237b..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Source.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.Closeable;
-
-/**
- * Describes the contract of a source for {@link Archive} or {@link Expand}.
- * @since 1.17
- */
-public interface Source extends Supplier>>, Closeable {
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/StreamBasedArchiveEntrySource.java b/src/org/apache/commons/compress/archivers/examples/StreamBasedArchiveEntrySource.java
deleted file mode 100644
index 19aa55b5174..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/StreamBasedArchiveEntrySource.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.NoSuchElementException;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.archivers.ArchiveStreamFactory;
-import org.apache.commons.compress.utils.NoCloseInputStream;
-
-/**
- * Supplier based on {@link ArchiveInputStream}s.
- * @since 1.17
- */
-public class StreamBasedArchiveEntrySource implements ArchiveEntrySource {
-
- private final ArchiveInputStream in;
-
- public StreamBasedArchiveEntrySource(ArchiveInputStream in) {
- this.in = in;
- }
-
- @Override
- public ThrowingIterator> get() throws IOException {
- return new ArchiveInputStreamIterator(in);
- }
-
- @Override
- public void close() throws IOException {
- in.close();
- }
-
- @Override
- public Filter skipUnreadable() {
- return new Filter() {
- @Override
- public boolean accept(String entryName, ArchiveEntry entry) {
- return in.canReadEntryData(entry);
- }
- };
- }
-
- private static class ArchiveInputStreamIterator implements ThrowingIterator> {
- private final ArchiveInputStream in;
- private ArchiveEntry nextEntry;
- private boolean nextEntryConsumed;
- ArchiveInputStreamIterator(ArchiveInputStream in) throws IOException {
- this.in = in;
- nextEntry = in.getNextEntry();
- nextEntryConsumed = false;
- }
-
- @Override
- public boolean hasNext() throws IOException {
- if (nextEntry == null || nextEntryConsumed) {
- nextEntry = in.getNextEntry();
- nextEntryConsumed = false;
- }
- return nextEntry != null && !nextEntryConsumed;
- }
-
- @Override
- public ChainPayload next() throws IOException {
- if (!hasNext()) {
- throw new NoSuchElementException();
- }
- nextEntryConsumed = true;
- return new ChainPayload(nextEntry, nextEntry.getName(), new Supplier() {
- @Override
- public InputStream get() throws IOException {
- return new NoCloseInputStream(in);
- }
- });
- }
-
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/Supplier.java b/src/org/apache/commons/compress/archivers/examples/Supplier.java
deleted file mode 100644
index aba61133d37..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Supplier.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Used inside of {@link ChainPayload} as well as {@link Archive} and {@link Expand}.
- * @since 1.12
- */
-public interface Supplier {
- /**
- * Supplies the object.
- *
- * @throws IOException if an I/O error occurs
- * @throws ArchiveException if an archive format related error occurs
- * @return the asked for object
- */
- T get() throws IOException, ArchiveException;
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ThrowingIterator.java b/src/org/apache/commons/compress/archivers/examples/ThrowingIterator.java
deleted file mode 100644
index 4a7bad8543a..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ThrowingIterator.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Specialized iterator that is allowed to throw Exceptions.
- */
-public interface ThrowingIterator {
- boolean hasNext() throws IOException, ArchiveException;
- T next() throws IOException, ArchiveException;
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/Transformer.java b/src/org/apache/commons/compress/archivers/examples/Transformer.java
deleted file mode 100644
index b09167854b3..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/Transformer.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.IOException;
-import org.apache.commons.compress.archivers.ArchiveException;
-
-/**
- * Transforming stage of a {@link Expand} or {@link Archive} chain.
- * @since 1.17
- */
-public abstract class Transformer implements ChainStep {
- /**
- * Transforms an entry.
- *
- * @param entry the entry
- * @return the transformed entry
- */
- public abstract ChainPayload transform(ChainPayload entry);
-
- @Override
- public void process(ChainPayload payload, Chain chain) throws IOException, ArchiveException {
- chain.next(transform(payload));
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/ZipArchiveEntrySource.java b/src/org/apache/commons/compress/archivers/examples/ZipArchiveEntrySource.java
deleted file mode 100644
index d5e84bcb74e..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/ZipArchiveEntrySource.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.examples;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.channels.SeekableByteChannel;
-import java.util.Enumeration;
-import java.util.NoSuchElementException;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipFile;
-
-/**
- * Supplier based on {@link ZipFile}s.
- * @since 1.17
- */
-public class ZipArchiveEntrySource implements ArchiveEntrySource {
-
- private final ZipFile zf;
-
- public ZipArchiveEntrySource(File f) throws IOException {
- this(new ZipFile(f));
- }
-
- public ZipArchiveEntrySource(SeekableByteChannel c) throws IOException {
- this(new ZipFile(c));
- }
-
- public ZipArchiveEntrySource(ZipFile file) {
- zf = file;
- }
-
- @Override
- public ThrowingIterator> get() throws IOException {
- return new ZipFileIterator(zf, zf.getEntries());
- }
-
- @Override
- public void close() throws IOException {
- zf.close();
- }
-
- @Override
- public Filter skipUnreadable() {
- return new Filter() {
- @Override
- public boolean accept(String entryName, ArchiveEntry entry) {
- return entry instanceof ZipArchiveEntry && zf.canReadEntryData((ZipArchiveEntry) entry);
- }
- };
- }
-
- private static class ZipFileIterator implements ThrowingIterator> {
- private final ZipFile zf;
- private final Enumeration iter;
- ZipFileIterator(ZipFile zf, Enumeration iter) {
- this.zf = zf;
- this.iter = iter;
- }
-
- @Override
- public boolean hasNext() throws IOException {
- return iter.hasMoreElements();
- }
-
- @Override
- public ChainPayload next() throws IOException {
- final ZipArchiveEntry z = iter.nextElement();
- return new ChainPayload(z, z.getName(), new Supplier() {
- @Override
- public InputStream get() throws IOException {
- return zf.getInputStream(z);
- }
- });
- }
-
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/examples/package.html b/src/org/apache/commons/compress/archivers/examples/package.html
deleted file mode 100644
index 443d5fc8fc7..00000000000
--- a/src/org/apache/commons/compress/archivers/examples/package.html
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-
-
Contains example code that is not guaranteed to provide a
- stable API across releases of Commons Compress.
-
-
-
diff --git a/src/org/apache/commons/compress/archivers/jar/JarArchiveEntry.java b/src/org/apache/commons/compress/archivers/jar/JarArchiveEntry.java
deleted file mode 100644
index f0c05f046a4..00000000000
--- a/src/org/apache/commons/compress/archivers/jar/JarArchiveEntry.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.jar;
-
-import java.security.cert.Certificate;
-import java.util.jar.Attributes;
-import java.util.jar.JarEntry;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipException;
-
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-
-/**
- *
- * @NotThreadSafe (parent is not thread-safe)
- */
-public class JarArchiveEntry extends ZipArchiveEntry {
-
- // These are always null - see https://issues.apache.org/jira/browse/COMPRESS-18 for discussion
- private final Attributes manifestAttributes = null;
- private final Certificate[] certificates = null;
-
- public JarArchiveEntry(final ZipEntry entry) throws ZipException {
- super(entry);
- }
-
- public JarArchiveEntry(final String name) {
- super(name);
- }
-
- public JarArchiveEntry(final ZipArchiveEntry entry) throws ZipException {
- super(entry);
- }
-
- public JarArchiveEntry(final JarEntry entry) throws ZipException {
- super(entry);
-
- }
-
- /**
- * This method is not implemented and won't ever be.
- * The JVM equivalent has a different name {@link java.util.jar.JarEntry#getAttributes()}
- *
- * @deprecated since 1.5, do not use; always returns null
- * @return Always returns null.
- */
- @Deprecated
- public Attributes getManifestAttributes() {
- return manifestAttributes;
- }
-
- /**
- * Return a copy of the list of certificates or null if there are none.
- *
- * @return Always returns null in the current implementation
- *
- * @deprecated since 1.5, not currently implemented
- */
- @Deprecated
- public Certificate[] getCertificates() {
- if (certificates != null) { // never true currently // NOSONAR
- final Certificate[] certs = new Certificate[certificates.length];
- System.arraycopy(certificates, 0, certs, 0, certs.length);
- return certs;
- }
- /*
- * Note, the method
- * Certificate[] java.util.jar.JarEntry.getCertificates()
- * also returns null or the list of certificates (but not copied)
- */
- return null;
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/jar/JarArchiveInputStream.java b/src/org/apache/commons/compress/archivers/jar/JarArchiveInputStream.java
deleted file mode 100644
index 47b1583ca48..00000000000
--- a/src/org/apache/commons/compress/archivers/jar/JarArchiveInputStream.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.jar;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
-
-/**
- * Implements an input stream that can read entries from jar files.
- *
- * @NotThreadSafe
- */
-public class JarArchiveInputStream extends ZipArchiveInputStream {
-
- /**
- * Creates an instance from the input stream using the default encoding.
- *
- * @param inputStream the input stream to wrap
- */
- public JarArchiveInputStream( final InputStream inputStream ) {
- super(inputStream);
- }
-
- /**
- * Creates an instance from the input stream using the specified encoding.
- *
- * @param inputStream the input stream to wrap
- * @param encoding the encoding to use
- * @since 1.10
- */
- public JarArchiveInputStream( final InputStream inputStream, final String encoding ) {
- super(inputStream, encoding);
- }
-
- public JarArchiveEntry getNextJarEntry() throws IOException {
- final ZipArchiveEntry entry = getNextZipEntry();
- return entry == null ? null : new JarArchiveEntry(entry);
- }
-
- @Override
- public ArchiveEntry getNextEntry() throws IOException {
- return getNextJarEntry();
- }
-
- /**
- * Checks if the signature matches what is expected for a jar file
- * (in this case it is the same as for a zip file).
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this stream is a jar archive stream, false otherwise
- */
- public static boolean matches(final byte[] signature, final int length ) {
- return ZipArchiveInputStream.matches(signature, length);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/jar/JarArchiveOutputStream.java b/src/org/apache/commons/compress/archivers/jar/JarArchiveOutputStream.java
deleted file mode 100644
index 5e2c7a8e576..00000000000
--- a/src/org/apache/commons/compress/archivers/jar/JarArchiveOutputStream.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.jar;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.zip.JarMarker;
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
-
-/**
- * Subclass that adds a special extra field to the very first entry
- * which allows the created archive to be used as an executable jar on
- * Solaris.
- *
- * @NotThreadSafe
- */
-public class JarArchiveOutputStream extends ZipArchiveOutputStream {
-
- private boolean jarMarkerAdded = false;
-
- public JarArchiveOutputStream(final OutputStream out) {
- super(out);
- }
-
- /**
- * Create and instance that wraps the output stream using the provided encoding.
- *
- * @param out the output stream to wrap
- * @param encoding the encoding to use. Use null for the platform default.
- * @since 1.10
- */
- public JarArchiveOutputStream(final OutputStream out, final String encoding) {
- super(out);
- setEncoding(encoding);
- }
-
- // @throws ClassCastException if entry is not an instance of ZipArchiveEntry
- @Override
- public void putArchiveEntry(final ArchiveEntry ze) throws IOException {
- if (!jarMarkerAdded) {
- ((ZipArchiveEntry)ze).addAsFirstExtraField(JarMarker.getInstance());
- jarMarkerAdded = true;
- }
- super.putArchiveEntry(ze);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/jar/package.html b/src/org/apache/commons/compress/archivers/jar/package.html
deleted file mode 100644
index 09829ae6a8a..00000000000
--- a/src/org/apache/commons/compress/archivers/jar/package.html
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-
-
Provides stream classes for reading and writing archives using
- the ZIP format with some extensions for the special case of JAR
- archives.
Provides a unified API and factories for dealing with archives
- in different formats.
-
-
diff --git a/src/org/apache/commons/compress/archivers/sevenz/AES256SHA256Decoder.java b/src/org/apache/commons/compress/archivers/sevenz/AES256SHA256Decoder.java
deleted file mode 100644
index aca9777fd85..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/AES256SHA256Decoder.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.security.GeneralSecurityException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import javax.crypto.Cipher;
-import javax.crypto.CipherInputStream;
-import javax.crypto.SecretKey;
-import javax.crypto.spec.IvParameterSpec;
-import javax.crypto.spec.SecretKeySpec;
-import org.apache.commons.compress.PasswordRequiredException;
-
-class AES256SHA256Decoder extends CoderBase {
- @Override
- InputStream decode(final String archiveName, final InputStream in, final long uncompressedLength,
- final Coder coder, final byte[] passwordBytes) throws IOException {
- return new InputStream() {
- private boolean isInitialized = false;
- private CipherInputStream cipherInputStream = null;
-
- private CipherInputStream init() throws IOException {
- if (isInitialized) {
- return cipherInputStream;
- }
- final int byte0 = 0xff & coder.properties[0];
- final int numCyclesPower = byte0 & 0x3f;
- final int byte1 = 0xff & coder.properties[1];
- final int ivSize = ((byte0 >> 6) & 1) + (byte1 & 0x0f);
- final int saltSize = ((byte0 >> 7) & 1) + (byte1 >> 4);
- if (2 + saltSize + ivSize > coder.properties.length) {
- throw new IOException("Salt size + IV size too long in " + archiveName);
- }
- final byte[] salt = new byte[saltSize];
- System.arraycopy(coder.properties, 2, salt, 0, saltSize);
- final byte[] iv = new byte[16];
- System.arraycopy(coder.properties, 2 + saltSize, iv, 0, ivSize);
-
- if (passwordBytes == null) {
- throw new PasswordRequiredException(archiveName);
- }
- final byte[] aesKeyBytes;
- if (numCyclesPower == 0x3f) {
- aesKeyBytes = new byte[32];
- System.arraycopy(salt, 0, aesKeyBytes, 0, saltSize);
- System.arraycopy(passwordBytes, 0, aesKeyBytes, saltSize,
- Math.min(passwordBytes.length, aesKeyBytes.length - saltSize));
- } else {
- final MessageDigest digest;
- try {
- digest = MessageDigest.getInstance("SHA-256");
- } catch (final NoSuchAlgorithmException noSuchAlgorithmException) {
- throw new IOException("SHA-256 is unsupported by your Java implementation",
- noSuchAlgorithmException);
- }
- final byte[] extra = new byte[8];
- for (long j = 0; j < (1L << numCyclesPower); j++) {
- digest.update(salt);
- digest.update(passwordBytes);
- digest.update(extra);
- for (int k = 0; k < extra.length; k++) {
- ++extra[k];
- if (extra[k] != 0) {
- break;
- }
- }
- }
- aesKeyBytes = digest.digest();
- }
-
- final SecretKey aesKey = new SecretKeySpec(aesKeyBytes, "AES");
- try {
- final Cipher cipher = Cipher.getInstance("AES/CBC/NoPadding");
- cipher.init(Cipher.DECRYPT_MODE, aesKey, new IvParameterSpec(iv));
- cipherInputStream = new CipherInputStream(in, cipher);
- isInitialized = true;
- return cipherInputStream;
- } catch (final GeneralSecurityException generalSecurityException) {
- throw new IOException("Decryption error " +
- "(do you have the JCE Unlimited Strength Jurisdiction Policy Files installed?)",
- generalSecurityException);
- }
- }
-
- @Override
- public int read() throws IOException {
- return init().read();
- }
-
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- return init().read(b, off, len);
- }
-
- @Override
- public void close() {
- }
- };
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/Archive.java b/src/org/apache/commons/compress/archivers/sevenz/Archive.java
deleted file mode 100644
index dd1c75aa210..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/Archive.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.util.BitSet;
-
-class Archive {
- /// Offset from beginning of file + SIGNATURE_HEADER_SIZE to packed streams.
- long packPos;
- /// Size of each packed stream.
- long[] packSizes;
- /// Whether each particular packed streams has a CRC.
- BitSet packCrcsDefined;
- /// CRCs for each packed stream, valid only if that packed stream has one.
- long[] packCrcs;
- /// Properties of solid compression blocks.
- Folder[] folders;
- /// Temporary properties for non-empty files (subsumed into the files array later).
- SubStreamsInfo subStreamsInfo;
- /// The files and directories in the archive.
- SevenZArchiveEntry[] files;
- /// Mapping between folders, files and streams.
- StreamMap streamMap;
-
- @Override
- public String toString() {
- return "Archive with packed streams starting at offset " + packPos
- + ", " + lengthOf(packSizes) + " pack sizes, " + lengthOf(packCrcs)
- + " CRCs, " + lengthOf(folders) + " folders, " + lengthOf(files)
- + " files and " + streamMap;
- }
-
- private static String lengthOf(final long[] a) {
- return a == null ? "(null)" : String.valueOf(a.length);
- }
-
- private static String lengthOf(final Object[] a) {
- return a == null ? "(null)" : String.valueOf(a.length);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/BindPair.java b/src/org/apache/commons/compress/archivers/sevenz/BindPair.java
deleted file mode 100644
index 2710b72ff43..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/BindPair.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-class BindPair {
- long inIndex;
- long outIndex;
-
- @Override
- public String toString() {
- return "BindPair binding input " + inIndex + " to output " + outIndex;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/BoundedSeekableByteChannelInputStream.java b/src/org/apache/commons/compress/archivers/sevenz/BoundedSeekableByteChannelInputStream.java
deleted file mode 100644
index 32b3bda7bd7..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/BoundedSeekableByteChannelInputStream.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-import java.nio.channels.SeekableByteChannel;
-
-class BoundedSeekableByteChannelInputStream extends InputStream {
- private static final int MAX_BUF_LEN = 8192;
- private final ByteBuffer buffer;
- private final SeekableByteChannel channel;
- private long bytesRemaining;
-
- public BoundedSeekableByteChannelInputStream(final SeekableByteChannel channel,
- final long size) {
- this.channel = channel;
- this.bytesRemaining = size;
- if (size < MAX_BUF_LEN && size > 0) {
- buffer = ByteBuffer.allocate((int) size);
- } else {
- buffer = ByteBuffer.allocate(MAX_BUF_LEN);
- }
- }
-
- @Override
- public int read() throws IOException {
- if (bytesRemaining > 0) {
- --bytesRemaining;
- int read = read(1);
- if (read < 0) {
- return read;
- }
- return buffer.get() & 0xff;
- }
- return -1;
- }
-
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- if (bytesRemaining == 0) {
- return -1;
- }
- int bytesToRead = len;
- if (bytesToRead > bytesRemaining) {
- bytesToRead = (int) bytesRemaining;
- }
- int bytesRead;
- ByteBuffer buf;
- if (bytesToRead <= buffer.capacity()) {
- buf = buffer;
- bytesRead = read(bytesToRead);
- } else {
- buf = ByteBuffer.allocate(bytesToRead);
- bytesRead = channel.read(buf);
- buf.flip();
- }
- if (bytesRead >= 0) {
- buf.get(b, off, bytesRead);
- bytesRemaining -= bytesRead;
- }
- return bytesRead;
- }
-
- private int read(int len) throws IOException {
- buffer.rewind().limit(len);
- int read = channel.read(buffer);
- buffer.flip();
- return read;
- }
-
- @Override
- public void close() {
- // the nested channel is controlled externally
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/CLI.java b/src/org/apache/commons/compress/archivers/sevenz/CLI.java
deleted file mode 100644
index a40f2b318f4..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/CLI.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-
-public class CLI {
-
-
- private enum Mode {
- LIST("Analysing") {
- @Override
- public void takeAction(final SevenZFile archive, final SevenZArchiveEntry entry) {
- System.out.print(entry.getName());
- if (entry.isDirectory()) {
- System.out.print(" dir");
- } else {
- System.out.print(" " + entry.getCompressedSize()
- + "/" + entry.getSize());
- }
- if (entry.getHasLastModifiedDate()) {
- System.out.print(" " + entry.getLastModifiedDate());
- } else {
- System.out.print(" no last modified date");
- }
- if (!entry.isDirectory()) {
- System.out.println(" " + getContentMethods(entry));
- } else {
- System.out.println("");
- }
- }
-
- private String getContentMethods(final SevenZArchiveEntry entry) {
- final StringBuilder sb = new StringBuilder();
- boolean first = true;
- for (final SevenZMethodConfiguration m : entry.getContentMethods()) {
- if (!first) {
- sb.append(", ");
- }
- first = false;
- sb.append(m.getMethod());
- if (m.getOptions() != null) {
- sb.append("(").append(m.getOptions()).append(")");
- }
- }
- return sb.toString();
- }
- },
- EXTRACT("Extracting") {
- private final byte[] buf = new byte[8192];
- @Override
- public void takeAction(final SevenZFile archive, final SevenZArchiveEntry entry)
- throws IOException {
- final File outFile = new File(entry.getName());
- if (entry.isDirectory()) {
- if (!outFile.isDirectory() && !outFile.mkdirs()) {
- throw new IOException("Cannot create directory " + outFile);
- }
- System.out.println("created directory " + outFile);
- return;
- }
-
- System.out.println("extracting to " + outFile);
- final File parent = outFile.getParentFile();
- if (parent != null && !parent.exists() && !parent.mkdirs()) {
- throw new IOException("Cannot create " + parent);
- }
- try (final FileOutputStream fos = new FileOutputStream(outFile)) {
- final long total = entry.getSize();
- long off = 0;
- while (off < total) {
- final int toRead = (int) Math.min(total - off, buf.length);
- final int bytesRead = archive.read(buf, 0, toRead);
- if (bytesRead < 1) {
- throw new IOException("reached end of entry "
- + entry.getName()
- + " after " + off
- + " bytes, expected "
- + total);
- }
- off += bytesRead;
- fos.write(buf, 0, bytesRead);
- }
- }
- }
- };
-
- private final String message;
- Mode(final String message) {
- this.message = message;
- }
- public String getMessage() {
- return message;
- }
- public abstract void takeAction(SevenZFile archive, SevenZArchiveEntry entry)
- throws IOException;
- }
-
- public static void main(final String[] args) throws Exception {
- if (args.length == 0) {
- usage();
- return;
- }
- final Mode mode = grabMode(args);
- System.out.println(mode.getMessage() + " " + args[0]);
- final File f = new File(args[0]);
- if (!f.isFile()) {
- System.err.println(f + " doesn't exist or is a directory");
- }
- try (final SevenZFile archive = new SevenZFile(f)) {
- SevenZArchiveEntry ae;
- while((ae=archive.getNextEntry()) != null) {
- mode.takeAction(archive, ae);
- }
- }
- }
-
- private static void usage() {
- System.out.println("Parameters: archive-name [list|extract]");
- }
-
- private static Mode grabMode(final String[] args) {
- if (args.length < 2) {
- return Mode.LIST;
- }
- return Enum.valueOf(Mode.class, args[1].toUpperCase());
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/Coder.java b/src/org/apache/commons/compress/archivers/sevenz/Coder.java
deleted file mode 100644
index cbd271d1df6..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/Coder.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-class Coder {
- byte[] decompressionMethodId;
- long numInStreams;
- long numOutStreams;
- byte[] properties = null;
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/CoderBase.java b/src/org/apache/commons/compress/archivers/sevenz/CoderBase.java
deleted file mode 100644
index 38425139527..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/CoderBase.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-/**
- * Base Codec class.
- */
-abstract class CoderBase {
- private final Class>[] acceptableOptions;
- private static final byte[] NONE = new byte[0];
-
- /**
- * @param acceptableOptions types that can be used as options for this codec.
- */
- protected CoderBase(final Class>... acceptableOptions) {
- this.acceptableOptions = acceptableOptions;
- }
-
- /**
- * @return whether this method can extract options from the given object.
- */
- boolean canAcceptOptions(final Object opts) {
- for (final Class> c : acceptableOptions) {
- if (c.isInstance(opts)) {
- return true;
- }
- }
- return false;
- }
-
- /**
- * @return property-bytes to write in a Folder block
- */
- byte[] getOptionsAsProperties(final Object options) throws IOException {
- return NONE;
- }
-
- /**
- * @return configuration options that have been used to create the given InputStream from the given Coder
- */
- Object getOptionsFromCoder(final Coder coder, final InputStream in) throws IOException {
- return null;
- }
-
- /**
- * @return a stream that reads from in using the configured coder and password.
- */
- abstract InputStream decode(final String archiveName,
- final InputStream in, long uncomressedLength,
- final Coder coder, byte[] password) throws IOException;
-
- /**
- * @return a stream that writes to out using the given configuration.
- */
- OutputStream encode(final OutputStream out, final Object options) throws IOException {
- throw new UnsupportedOperationException("method doesn't support writing");
- }
-
- /**
- * If the option represents a number, return its integer
- * value, otherwise return the given default value.
- */
- protected static int numberOptionOrDefault(final Object options, final int defaultValue) {
- return options instanceof Number ? ((Number) options).intValue() : defaultValue;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/Coders.java b/src/org/apache/commons/compress/archivers/sevenz/Coders.java
deleted file mode 100644
index 729a1a3d8d3..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/Coders.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.SequenceInputStream;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.zip.Deflater;
-import java.util.zip.DeflaterOutputStream;
-import java.util.zip.Inflater;
-import java.util.zip.InflaterInputStream;
-
-import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
-import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream;
-import org.apache.commons.compress.compressors.deflate64.Deflate64CompressorInputStream;
-import org.apache.commons.compress.utils.FlushShieldFilterOutputStream;
-import org.tukaani.xz.ARMOptions;
-import org.tukaani.xz.ARMThumbOptions;
-import org.tukaani.xz.FilterOptions;
-import org.tukaani.xz.FinishableWrapperOutputStream;
-import org.tukaani.xz.IA64Options;
-import org.tukaani.xz.PowerPCOptions;
-import org.tukaani.xz.SPARCOptions;
-import org.tukaani.xz.X86Options;
-
-class Coders {
- private static final Map CODER_MAP = new HashMap() {
-
- private static final long serialVersionUID = 1664829131806520867L;
- {
- put(SevenZMethod.COPY, new CopyDecoder());
- put(SevenZMethod.LZMA, new LZMADecoder());
- put(SevenZMethod.LZMA2, new LZMA2Decoder());
- put(SevenZMethod.DEFLATE, new DeflateDecoder());
- put(SevenZMethod.DEFLATE64, new Deflate64Decoder());
- put(SevenZMethod.BZIP2, new BZIP2Decoder());
- put(SevenZMethod.AES256SHA256, new AES256SHA256Decoder());
- put(SevenZMethod.BCJ_X86_FILTER, new BCJDecoder(new X86Options()));
- put(SevenZMethod.BCJ_PPC_FILTER, new BCJDecoder(new PowerPCOptions()));
- put(SevenZMethod.BCJ_IA64_FILTER, new BCJDecoder(new IA64Options()));
- put(SevenZMethod.BCJ_ARM_FILTER, new BCJDecoder(new ARMOptions()));
- put(SevenZMethod.BCJ_ARM_THUMB_FILTER, new BCJDecoder(new ARMThumbOptions()));
- put(SevenZMethod.BCJ_SPARC_FILTER, new BCJDecoder(new SPARCOptions()));
- put(SevenZMethod.DELTA_FILTER, new DeltaDecoder());
- }};
-
- static CoderBase findByMethod(final SevenZMethod method) {
- return CODER_MAP.get(method);
- }
-
- static InputStream addDecoder(final String archiveName, final InputStream is, final long uncompressedLength,
- final Coder coder, final byte[] password) throws IOException {
- final CoderBase cb = findByMethod(SevenZMethod.byId(coder.decompressionMethodId));
- if (cb == null) {
- throw new IOException("Unsupported compression method " +
- Arrays.toString(coder.decompressionMethodId)
- + " used in " + archiveName);
- }
- return cb.decode(archiveName, is, uncompressedLength, coder, password);
- }
-
- static OutputStream addEncoder(final OutputStream out, final SevenZMethod method,
- final Object options) throws IOException {
- final CoderBase cb = findByMethod(method);
- if (cb == null) {
- throw new IOException("Unsupported compression method " + method);
- }
- return cb.encode(out, options);
- }
-
- static class CopyDecoder extends CoderBase {
- @Override
- InputStream decode(final String archiveName, final InputStream in, final long uncompressedLength,
- final Coder coder, final byte[] password) throws IOException {
- return in;
- }
- @Override
- OutputStream encode(final OutputStream out, final Object options) {
- return out;
- }
- }
-
- static class BCJDecoder extends CoderBase {
- private final FilterOptions opts;
- BCJDecoder(final FilterOptions opts) {
- this.opts = opts;
- }
-
- @Override
- InputStream decode(final String archiveName, final InputStream in, final long uncompressedLength,
- final Coder coder, final byte[] password) throws IOException {
- try {
- return opts.getInputStream(in);
- } catch (final AssertionError e) {
- throw new IOException("BCJ filter used in " + archiveName
- + " needs XZ for Java > 1.4 - see "
- + "https://commons.apache.org/proper/commons-compress/limitations.html#7Z",
- e);
- }
- }
-
- @SuppressWarnings("resource")
- @Override
- OutputStream encode(final OutputStream out, final Object options) {
- return new FlushShieldFilterOutputStream(opts.getOutputStream(new FinishableWrapperOutputStream(out)));
- }
- }
-
- static class DeflateDecoder extends CoderBase {
- private static final byte[] ONE_ZERO_BYTE = new byte[1];
- DeflateDecoder() {
- super(Number.class);
- }
-
- @SuppressWarnings("resource") // caller must close the InputStream
- @Override
- InputStream decode(final String archiveName, final InputStream in, final long uncompressedLength,
- final Coder coder, final byte[] password)
- throws IOException {
- final Inflater inflater = new Inflater(true);
- // Inflater with nowrap=true has this odd contract for a zero padding
- // byte following the data stream; this used to be zlib's requirement
- // and has been fixed a long time ago, but the contract persists so
- // we comply.
- // https://docs.oracle.com/javase/7/docs/api/java/util/zip/Inflater.html#Inflater(boolean)
- final InflaterInputStream inflaterInputStream = new InflaterInputStream(new SequenceInputStream(in,
- new ByteArrayInputStream(ONE_ZERO_BYTE)), inflater);
- return new DeflateDecoderInputStream(inflaterInputStream, inflater);
- }
- @Override
- OutputStream encode(final OutputStream out, final Object options) {
- final int level = numberOptionOrDefault(options, 9);
- final Deflater deflater = new Deflater(level, true);
- final DeflaterOutputStream deflaterOutputStream = new DeflaterOutputStream(out, deflater);
- return new DeflateDecoderOutputStream(deflaterOutputStream, deflater);
- }
-
- static class DeflateDecoderInputStream extends InputStream {
-
- InflaterInputStream inflaterInputStream;
- Inflater inflater;
-
- public DeflateDecoderInputStream(InflaterInputStream inflaterInputStream,
- Inflater inflater) {
- this.inflaterInputStream = inflaterInputStream;
- this.inflater = inflater;
- }
-
- @Override
- public int read() throws IOException {
- return inflaterInputStream.read();
- }
-
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- return inflaterInputStream.read(b, off, len);
- }
-
- @Override
- public int read(final byte[] b) throws IOException {
- return inflaterInputStream.read(b);
- }
-
- @Override
- public void close() throws IOException {
- try {
- inflaterInputStream.close();
- } finally {
- inflater.end();
- }
- }
- }
-
- static class DeflateDecoderOutputStream extends OutputStream {
-
- DeflaterOutputStream deflaterOutputStream;
- Deflater deflater;
-
- public DeflateDecoderOutputStream(DeflaterOutputStream deflaterOutputStream,
- Deflater deflater) {
- this.deflaterOutputStream = deflaterOutputStream;
- this.deflater = deflater;
- }
-
- @Override
- public void write(final int b) throws IOException {
- deflaterOutputStream.write(b);
- }
-
- @Override
- public void write(final byte[] b) throws IOException {
- deflaterOutputStream.write(b);
- }
-
- @Override
- public void write(final byte[] b, final int off, final int len) throws IOException {
- deflaterOutputStream.write(b, off, len);
- }
-
- @Override
- public void close() throws IOException {
- try {
- deflaterOutputStream.close();
- } finally {
- deflater.end();
- }
- }
- }
- }
-
- static class Deflate64Decoder extends CoderBase {
- Deflate64Decoder() {
- super(Number.class);
- }
-
- @SuppressWarnings("resource") // caller must close the InputStream
- @Override
- InputStream decode(final String archiveName, final InputStream in, final long uncompressedLength,
- final Coder coder, final byte[] password)
- throws IOException {
- return new Deflate64CompressorInputStream(in);
- }
- }
-
- static class BZIP2Decoder extends CoderBase {
- BZIP2Decoder() {
- super(Number.class);
- }
-
- @Override
- InputStream decode(final String archiveName, final InputStream in, final long uncompressedLength,
- final Coder coder, final byte[] password)
- throws IOException {
- return new BZip2CompressorInputStream(in);
- }
- @Override
- OutputStream encode(final OutputStream out, final Object options)
- throws IOException {
- final int blockSize = numberOptionOrDefault(options, BZip2CompressorOutputStream.MAX_BLOCKSIZE);
- return new BZip2CompressorOutputStream(out, blockSize);
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/DeltaDecoder.java b/src/org/apache/commons/compress/archivers/sevenz/DeltaDecoder.java
deleted file mode 100644
index bc58c636d74..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/DeltaDecoder.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import org.tukaani.xz.DeltaOptions;
-import org.tukaani.xz.FinishableWrapperOutputStream;
-import org.tukaani.xz.UnsupportedOptionsException;
-
-class DeltaDecoder extends CoderBase {
- DeltaDecoder() {
- super(Number.class);
- }
-
- @Override
- InputStream decode(final String archiveName, final InputStream in, final long uncompressedLength,
- final Coder coder, final byte[] password) throws IOException {
- return new DeltaOptions(getOptionsFromCoder(coder)).getInputStream(in);
- }
-
- @SuppressWarnings("resource")
- @Override
- OutputStream encode(final OutputStream out, final Object options) throws IOException {
- final int distance = numberOptionOrDefault(options, 1);
- try {
- return new DeltaOptions(distance).getOutputStream(new FinishableWrapperOutputStream(out));
- } catch (final UnsupportedOptionsException ex) {
- throw new IOException(ex.getMessage());
- }
- }
-
- @Override
- byte[] getOptionsAsProperties(final Object options) {
- return new byte[] {
- (byte) (numberOptionOrDefault(options, 1) - 1)
- };
- }
-
- @Override
- Object getOptionsFromCoder(final Coder coder, final InputStream in) {
- return getOptionsFromCoder(coder);
- }
-
- private int getOptionsFromCoder(final Coder coder) {
- if (coder.properties == null || coder.properties.length == 0) {
- return 1;
- }
- return (0xff & coder.properties[0]) + 1;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/Folder.java b/src/org/apache/commons/compress/archivers/sevenz/Folder.java
deleted file mode 100644
index 128cba928e3..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/Folder.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.util.LinkedList;
-
-/**
- * The unit of solid compression.
- */
-class Folder {
- /// List of coders used in this folder, eg. one for compression, one for encryption.
- Coder[] coders;
- /// Total number of input streams across all coders.
- /// this field is currently unused but technically part of the 7z API
- long totalInputStreams;
- /// Total number of output streams across all coders.
- long totalOutputStreams;
- /// Mapping between input and output streams.
- BindPair[] bindPairs;
- /// Indeces of input streams, one per input stream not listed in bindPairs.
- long[] packedStreams;
- /// Unpack sizes, per each output stream.
- long[] unpackSizes;
- /// Whether the folder has a CRC.
- boolean hasCrc;
- /// The CRC, if present.
- long crc;
- /// The number of unpack substreams, product of the number of
- /// output streams and the nuber of non-empty files in this
- /// folder.
- int numUnpackSubStreams;
-
- /**
- * Sorts Coders using bind pairs.
- *
The first coder reads from the packed stream (we currently
- * only support single input stream decoders), the second reads
- * from the output of the first and so on.
- */
- Iterable getOrderedCoders() {
- final LinkedList l = new LinkedList<>();
- int current = (int) packedStreams[0]; // more that 2^31 coders?
- while (current != -1) {
- l.addLast(coders[current]);
- final int pair = findBindPairForOutStream(current);
- current = pair != -1 ? (int) bindPairs[pair].inIndex : -1;
- }
- return l;
- }
-
- int findBindPairForInStream(final int index) {
- for (int i = 0; i < bindPairs.length; i++) {
- if (bindPairs[i].inIndex == index) {
- return i;
- }
- }
- return -1;
- }
-
- int findBindPairForOutStream(final int index) {
- for (int i = 0; i < bindPairs.length; i++) {
- if (bindPairs[i].outIndex == index) {
- return i;
- }
- }
- return -1;
- }
-
- long getUnpackSize() {
- if (totalOutputStreams == 0) {
- return 0;
- }
- for (int i = ((int)totalOutputStreams) - 1; i >= 0; i--) {
- if (findBindPairForOutStream(i) < 0) {
- return unpackSizes[i];
- }
- }
- return 0;
- }
-
- long getUnpackSizeForCoder(final Coder coder) {
- if (coders != null) {
- for (int i = 0; i < coders.length; i++) {
- if (coders[i] == coder) {
- return unpackSizes[i];
- }
- }
- }
- return 0;
- }
-
- @Override
- public String toString() {
- return "Folder with " + coders.length + " coders, " + totalInputStreams
- + " input streams, " + totalOutputStreams + " output streams, "
- + bindPairs.length + " bind pairs, " + packedStreams.length
- + " packed streams, " + unpackSizes.length + " unpack sizes, "
- + (hasCrc ? "with CRC " + crc : "without CRC")
- + " and " + numUnpackSubStreams + " unpack streams";
- }
-}
-
diff --git a/src/org/apache/commons/compress/archivers/sevenz/LZMA2Decoder.java b/src/org/apache/commons/compress/archivers/sevenz/LZMA2Decoder.java
deleted file mode 100644
index 0f13ca95c73..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/LZMA2Decoder.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import org.tukaani.xz.FinishableWrapperOutputStream;
-import org.tukaani.xz.FinishableOutputStream;
-import org.tukaani.xz.LZMA2InputStream;
-import org.tukaani.xz.LZMA2Options;
-
-class LZMA2Decoder extends CoderBase {
- LZMA2Decoder() {
- super(LZMA2Options.class, Number.class);
- }
-
- @Override
- InputStream decode(final String archiveName, final InputStream in, final long uncompressedLength,
- final Coder coder, final byte[] password) throws IOException {
- try {
- final int dictionarySize = getDictionarySize(coder);
- return new LZMA2InputStream(in, dictionarySize);
- } catch (final IllegalArgumentException ex) {
- throw new IOException(ex.getMessage());
- }
- }
-
- @Override
- OutputStream encode(final OutputStream out, final Object opts)
- throws IOException {
- final LZMA2Options options = getOptions(opts);
- final FinishableOutputStream wrapped = new FinishableWrapperOutputStream(out);
- return options.getOutputStream(wrapped);
- }
-
- @Override
- byte[] getOptionsAsProperties(final Object opts) {
- final int dictSize = getDictSize(opts);
- final int lead = Integer.numberOfLeadingZeros(dictSize);
- final int secondBit = (dictSize >>> (30 - lead)) - 2;
- return new byte[] {
- (byte) ((19 - lead) * 2 + secondBit)
- };
- }
-
- @Override
- Object getOptionsFromCoder(final Coder coder, final InputStream in) {
- return getDictionarySize(coder);
- }
-
- private int getDictSize(final Object opts) {
- if (opts instanceof LZMA2Options) {
- return ((LZMA2Options) opts).getDictSize();
- }
- return numberOptionOrDefault(opts);
- }
-
- private int getDictionarySize(final Coder coder) throws IllegalArgumentException {
- final int dictionarySizeBits = 0xff & coder.properties[0];
- if ((dictionarySizeBits & (~0x3f)) != 0) {
- throw new IllegalArgumentException("Unsupported LZMA2 property bits");
- }
- if (dictionarySizeBits > 40) {
- throw new IllegalArgumentException("Dictionary larger than 4GiB maximum size");
- }
- if (dictionarySizeBits == 40) {
- return 0xFFFFffff;
- }
- return (2 | (dictionarySizeBits & 0x1)) << (dictionarySizeBits / 2 + 11);
- }
-
- private LZMA2Options getOptions(final Object opts) throws IOException {
- if (opts instanceof LZMA2Options) {
- return (LZMA2Options) opts;
- }
- final LZMA2Options options = new LZMA2Options();
- options.setDictSize(numberOptionOrDefault(opts));
- return options;
- }
-
- private int numberOptionOrDefault(final Object opts) {
- return numberOptionOrDefault(opts, LZMA2Options.DICT_SIZE_DEFAULT);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/LZMADecoder.java b/src/org/apache/commons/compress/archivers/sevenz/LZMADecoder.java
deleted file mode 100644
index 6e3d46ccc25..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/LZMADecoder.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import org.apache.commons.compress.utils.ByteUtils;
-import org.apache.commons.compress.utils.FlushShieldFilterOutputStream;
-import org.tukaani.xz.LZMA2Options;
-import org.tukaani.xz.LZMAInputStream;
-import org.tukaani.xz.LZMAOutputStream;
-
-class LZMADecoder extends CoderBase {
- LZMADecoder() {
- super(LZMA2Options.class, Number.class);
- }
-
- @Override
- InputStream decode(final String archiveName, final InputStream in, final long uncompressedLength,
- final Coder coder, final byte[] password) throws IOException {
- final byte propsByte = coder.properties[0];
- final int dictSize = getDictionarySize(coder);
- if (dictSize > LZMAInputStream.DICT_SIZE_MAX) {
- throw new IOException("Dictionary larger than 4GiB maximum size used in " + archiveName);
- }
- return new LZMAInputStream(in, uncompressedLength, propsByte, dictSize);
- }
-
- @SuppressWarnings("resource")
- @Override
- OutputStream encode(final OutputStream out, final Object opts)
- throws IOException {
- // NOOP as LZMAOutputStream throws an exception in flush
- return new FlushShieldFilterOutputStream(new LZMAOutputStream(out, getOptions(opts), false));
- }
-
- @Override
- byte[] getOptionsAsProperties(final Object opts) throws IOException {
- final LZMA2Options options = getOptions(opts);
- final byte props = (byte) ((options.getPb() * 5 + options.getLp()) * 9 + options.getLc());
- int dictSize = options.getDictSize();
- byte[] o = new byte[5];
- o[0] = props;
- ByteUtils.toLittleEndian(o, dictSize, 1, 4);
- return o;
- }
-
- @Override
- Object getOptionsFromCoder(final Coder coder, final InputStream in) throws IOException {
- final byte propsByte = coder.properties[0];
- int props = propsByte & 0xFF;
- int pb = props / (9 * 5);
- props -= pb * 9 * 5;
- int lp = props / 9;
- int lc = props - lp * 9;
- LZMA2Options opts = new LZMA2Options();
- opts.setPb(pb);
- opts.setLcLp(lc, lp);
- opts.setDictSize(getDictionarySize(coder));
- return opts;
- }
-
- private int getDictionarySize(final Coder coder) throws IllegalArgumentException {
- return (int) ByteUtils.fromLittleEndian(coder.properties, 1, 4);
- }
-
- private LZMA2Options getOptions(final Object opts) throws IOException {
- if (opts instanceof LZMA2Options) {
- return (LZMA2Options) opts;
- }
- final LZMA2Options options = new LZMA2Options();
- options.setDictSize(numberOptionOrDefault(opts));
- return options;
- }
-
- private int numberOptionOrDefault(final Object opts) {
- return numberOptionOrDefault(opts, LZMA2Options.DICT_SIZE_DEFAULT);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/NID.java b/src/org/apache/commons/compress/archivers/sevenz/NID.java
deleted file mode 100644
index 89a813a2a19..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/NID.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-final class NID {
- public static final int kEnd = 0x00;
- public static final int kHeader = 0x01;
- public static final int kArchiveProperties = 0x02;
- public static final int kAdditionalStreamsInfo = 0x03;
- public static final int kMainStreamsInfo = 0x04;
- public static final int kFilesInfo = 0x05;
- public static final int kPackInfo = 0x06;
- public static final int kUnpackInfo = 0x07;
- public static final int kSubStreamsInfo = 0x08;
- public static final int kSize = 0x09;
- public static final int kCRC = 0x0A;
- public static final int kFolder = 0x0B;
- public static final int kCodersUnpackSize = 0x0C;
- public static final int kNumUnpackStream = 0x0D;
- public static final int kEmptyStream = 0x0E;
- public static final int kEmptyFile = 0x0F;
- public static final int kAnti = 0x10;
- public static final int kName = 0x11;
- public static final int kCTime = 0x12;
- public static final int kATime = 0x13;
- public static final int kMTime = 0x14;
- public static final int kWinAttributes = 0x15;
- public static final int kComment = 0x16;
- public static final int kEncodedHeader = 0x17;
- public static final int kStartPos = 0x18;
- public static final int kDummy = 0x19;
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/SevenZArchiveEntry.java b/src/org/apache/commons/compress/archivers/sevenz/SevenZArchiveEntry.java
deleted file mode 100644
index f95426b652b..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/SevenZArchiveEntry.java
+++ /dev/null
@@ -1,515 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.util.Calendar;
-import java.util.Collections;
-import java.util.Date;
-import java.util.LinkedList;
-import java.util.TimeZone;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-
-/**
- * An entry in a 7z archive.
- *
- * @NotThreadSafe
- * @since 1.6
- */
-public class SevenZArchiveEntry implements ArchiveEntry {
- private String name;
- private boolean hasStream;
- private boolean isDirectory;
- private boolean isAntiItem;
- private boolean hasCreationDate;
- private boolean hasLastModifiedDate;
- private boolean hasAccessDate;
- private long creationDate;
- private long lastModifiedDate;
- private long accessDate;
- private boolean hasWindowsAttributes;
- private int windowsAttributes;
- private boolean hasCrc;
- private long crc, compressedCrc;
- private long size, compressedSize;
- private Iterable extends SevenZMethodConfiguration> contentMethods;
-
- public SevenZArchiveEntry() {
- }
-
- /**
- * Get this entry's name.
- *
- *
This method returns the raw name as it is stored inside of the archive.
- *
- * @return This entry's name.
- */
- @Override
- public String getName() {
- return name;
- }
-
- /**
- * Set this entry's name.
- *
- * @param name This entry's new name.
- */
- public void setName(final String name) {
- this.name = name;
- }
-
- /**
- * Whether there is any content associated with this entry.
- * @return whether there is any content associated with this entry.
- */
- public boolean hasStream() {
- return hasStream;
- }
-
- /**
- * Sets whether there is any content associated with this entry.
- * @param hasStream whether there is any content associated with this entry.
- */
- public void setHasStream(final boolean hasStream) {
- this.hasStream = hasStream;
- }
-
- /**
- * Return whether or not this entry represents a directory.
- *
- * @return True if this entry is a directory.
- */
- @Override
- public boolean isDirectory() {
- return isDirectory;
- }
-
- /**
- * Sets whether or not this entry represents a directory.
- *
- * @param isDirectory True if this entry is a directory.
- */
- public void setDirectory(final boolean isDirectory) {
- this.isDirectory = isDirectory;
- }
-
- /**
- * Indicates whether this is an "anti-item" used in differential backups,
- * meaning it should delete the same file from a previous backup.
- * @return true if it is an anti-item, false otherwise
- */
- public boolean isAntiItem() {
- return isAntiItem;
- }
-
- /**
- * Sets whether this is an "anti-item" used in differential backups,
- * meaning it should delete the same file from a previous backup.
- * @param isAntiItem true if it is an anti-item, false otherwise
- */
- public void setAntiItem(final boolean isAntiItem) {
- this.isAntiItem = isAntiItem;
- }
-
- /**
- * Returns whether this entry has got a creation date at all.
- * @return whether the entry has got a creation date
- */
- public boolean getHasCreationDate() {
- return hasCreationDate;
- }
-
- /**
- * Sets whether this entry has got a creation date at all.
- * @param hasCreationDate whether the entry has got a creation date
- */
- public void setHasCreationDate(final boolean hasCreationDate) {
- this.hasCreationDate = hasCreationDate;
- }
-
- /**
- * Gets the creation date.
- * @throws UnsupportedOperationException if the entry hasn't got a
- * creation date.
- * @return the creation date
- */
- public Date getCreationDate() {
- if (hasCreationDate) {
- return ntfsTimeToJavaTime(creationDate);
- }
- throw new UnsupportedOperationException(
- "The entry doesn't have this timestamp");
- }
-
- /**
- * Sets the creation date using NTFS time (100 nanosecond units
- * since 1 January 1601)
- * @param ntfsCreationDate the creation date
- */
- public void setCreationDate(final long ntfsCreationDate) {
- this.creationDate = ntfsCreationDate;
- }
-
- /**
- * Sets the creation date,
- * @param creationDate the creation date
- */
- public void setCreationDate(final Date creationDate) {
- hasCreationDate = creationDate != null;
- if (hasCreationDate) {
- this.creationDate = javaTimeToNtfsTime(creationDate);
- }
- }
-
- /**
- * Returns whether this entry has got a last modified date at all.
- * @return whether this entry has got a last modified date at all
- */
- public boolean getHasLastModifiedDate() {
- return hasLastModifiedDate;
- }
-
- /**
- * Sets whether this entry has got a last modified date at all.
- * @param hasLastModifiedDate whether this entry has got a last
- * modified date at all
- */
- public void setHasLastModifiedDate(final boolean hasLastModifiedDate) {
- this.hasLastModifiedDate = hasLastModifiedDate;
- }
-
- /**
- * Gets the last modified date.
- * @throws UnsupportedOperationException if the entry hasn't got a
- * last modified date.
- * @return the last modified date
- */
- @Override
- public Date getLastModifiedDate() {
- if (hasLastModifiedDate) {
- return ntfsTimeToJavaTime(lastModifiedDate);
- }
- throw new UnsupportedOperationException(
- "The entry doesn't have this timestamp");
- }
-
- /**
- * Sets the last modified date using NTFS time (100 nanosecond
- * units since 1 January 1601)
- * @param ntfsLastModifiedDate the last modified date
- */
- public void setLastModifiedDate(final long ntfsLastModifiedDate) {
- this.lastModifiedDate = ntfsLastModifiedDate;
- }
-
- /**
- * Sets the last modified date,
- * @param lastModifiedDate the last modified date
- */
- public void setLastModifiedDate(final Date lastModifiedDate) {
- hasLastModifiedDate = lastModifiedDate != null;
- if (hasLastModifiedDate) {
- this.lastModifiedDate = javaTimeToNtfsTime(lastModifiedDate);
- }
- }
-
- /**
- * Returns whether this entry has got an access date at all.
- * @return whether this entry has got an access date at all.
- */
- public boolean getHasAccessDate() {
- return hasAccessDate;
- }
-
- /**
- * Sets whether this entry has got an access date at all.
- * @param hasAcessDate whether this entry has got an access date at all.
- */
- public void setHasAccessDate(final boolean hasAcessDate) {
- this.hasAccessDate = hasAcessDate;
- }
-
- /**
- * Gets the access date.
- * @throws UnsupportedOperationException if the entry hasn't got a
- * access date.
- * @return the access date
- */
- public Date getAccessDate() {
- if (hasAccessDate) {
- return ntfsTimeToJavaTime(accessDate);
- }
- throw new UnsupportedOperationException(
- "The entry doesn't have this timestamp");
- }
-
- /**
- * Sets the access date using NTFS time (100 nanosecond units
- * since 1 January 1601)
- * @param ntfsAccessDate the access date
- */
- public void setAccessDate(final long ntfsAccessDate) {
- this.accessDate = ntfsAccessDate;
- }
-
- /**
- * Sets the access date,
- * @param accessDate the access date
- */
- public void setAccessDate(final Date accessDate) {
- hasAccessDate = accessDate != null;
- if (hasAccessDate) {
- this.accessDate = javaTimeToNtfsTime(accessDate);
- }
- }
-
- /**
- * Returns whether this entry has windows attributes.
- * @return whether this entry has windows attributes.
- */
- public boolean getHasWindowsAttributes() {
- return hasWindowsAttributes;
- }
-
- /**
- * Sets whether this entry has windows attributes.
- * @param hasWindowsAttributes whether this entry has windows attributes.
- */
- public void setHasWindowsAttributes(final boolean hasWindowsAttributes) {
- this.hasWindowsAttributes = hasWindowsAttributes;
- }
-
- /**
- * Gets the windows attributes.
- * @return the windows attributes
- */
- public int getWindowsAttributes() {
- return windowsAttributes;
- }
-
- /**
- * Sets the windows attributes.
- * @param windowsAttributes the windows attributes
- */
- public void setWindowsAttributes(final int windowsAttributes) {
- this.windowsAttributes = windowsAttributes;
- }
-
- /**
- * Returns whether this entry has got a crc.
- *
- *
In general entries without streams don't have a CRC either.
- * @return whether this entry has got a crc.
- */
- public boolean getHasCrc() {
- return hasCrc;
- }
-
- /**
- * Sets whether this entry has got a crc.
- * @param hasCrc whether this entry has got a crc.
- */
- public void setHasCrc(final boolean hasCrc) {
- this.hasCrc = hasCrc;
- }
-
- /**
- * Gets the CRC.
- * @deprecated use getCrcValue instead.
- * @return the CRC
- */
- @Deprecated
- public int getCrc() {
- return (int) crc;
- }
-
- /**
- * Sets the CRC.
- * @deprecated use setCrcValue instead.
- * @param crc the CRC
- */
- @Deprecated
- public void setCrc(final int crc) {
- this.crc = crc;
- }
-
- /**
- * Gets the CRC.
- * @since Compress 1.7
- * @return the CRC
- */
- public long getCrcValue() {
- return crc;
- }
-
- /**
- * Sets the CRC.
- * @since Compress 1.7
- * @param crc the CRC
- */
- public void setCrcValue(final long crc) {
- this.crc = crc;
- }
-
- /**
- * Gets the compressed CRC.
- * @deprecated use getCompressedCrcValue instead.
- * @return the compressed CRC
- */
- @Deprecated
- int getCompressedCrc() {
- return (int) compressedCrc;
- }
-
- /**
- * Sets the compressed CRC.
- * @deprecated use setCompressedCrcValue instead.
- * @param crc the CRC
- */
- @Deprecated
- void setCompressedCrc(final int crc) {
- this.compressedCrc = crc;
- }
-
- /**
- * Gets the compressed CRC.
- * @since Compress 1.7
- * @return the CRC
- */
- long getCompressedCrcValue() {
- return compressedCrc;
- }
-
- /**
- * Sets the compressed CRC.
- * @since Compress 1.7
- * @param crc the CRC
- */
- void setCompressedCrcValue(final long crc) {
- this.compressedCrc = crc;
- }
-
- /**
- * Get this entry's file size.
- *
- * @return This entry's file size.
- */
- @Override
- public long getSize() {
- return size;
- }
-
- /**
- * Set this entry's file size.
- *
- * @param size This entry's new file size.
- */
- public void setSize(final long size) {
- this.size = size;
- }
-
- /**
- * Get this entry's compressed file size.
- *
- * @return This entry's compressed file size.
- */
- long getCompressedSize() {
- return compressedSize;
- }
-
- /**
- * Set this entry's compressed file size.
- *
- * @param size This entry's new compressed file size.
- */
- void setCompressedSize(final long size) {
- this.compressedSize = size;
- }
-
- /**
- * Sets the (compression) methods to use for entry's content - the
- * default is LZMA2.
- *
- *
Currently only {@link SevenZMethod#COPY}, {@link
- * SevenZMethod#LZMA2}, {@link SevenZMethod#BZIP2} and {@link
- * SevenZMethod#DEFLATE} are supported when writing archives.
- *
- *
The methods will be consulted in iteration order to create
- * the final output.
- *
- * @param methods the methods to use for the content
- * @since 1.8
- */
- public void setContentMethods(final Iterable extends SevenZMethodConfiguration> methods) {
- if (methods != null) {
- final LinkedList l = new LinkedList<>();
- for (final SevenZMethodConfiguration m : methods) {
- l.addLast(m);
- }
- contentMethods = Collections.unmodifiableList(l);
- } else {
- contentMethods = null;
- }
- }
-
- /**
- * Gets the (compression) methods to use for entry's content - the
- * default is LZMA2.
- *
- *
Currently only {@link SevenZMethod#COPY}, {@link
- * SevenZMethod#LZMA2}, {@link SevenZMethod#BZIP2} and {@link
- * SevenZMethod#DEFLATE} are supported when writing archives.
- *
- *
The methods will be consulted in iteration order to create
- * the final output.
- *
- * @since 1.8
- * @return the methods to use for the content
- */
- public Iterable extends SevenZMethodConfiguration> getContentMethods() {
- return contentMethods;
- }
-
- /**
- * Converts NTFS time (100 nanosecond units since 1 January 1601)
- * to Java time.
- * @param ntfsTime the NTFS time in 100 nanosecond units
- * @return the Java time
- */
- public static Date ntfsTimeToJavaTime(final long ntfsTime) {
- final Calendar ntfsEpoch = Calendar.getInstance();
- ntfsEpoch.setTimeZone(TimeZone.getTimeZone("GMT+0"));
- ntfsEpoch.set(1601, 0, 1, 0, 0, 0);
- ntfsEpoch.set(Calendar.MILLISECOND, 0);
- final long realTime = ntfsEpoch.getTimeInMillis() + (ntfsTime / (10*1000));
- return new Date(realTime);
- }
-
- /**
- * Converts Java time to NTFS time.
- * @param date the Java time
- * @return the NTFS time
- */
- public static long javaTimeToNtfsTime(final Date date) {
- final Calendar ntfsEpoch = Calendar.getInstance();
- ntfsEpoch.setTimeZone(TimeZone.getTimeZone("GMT+0"));
- ntfsEpoch.set(1601, 0, 1, 0, 0, 0);
- ntfsEpoch.set(Calendar.MILLISECOND, 0);
- return ((date.getTime() - ntfsEpoch.getTimeInMillis())* 1000 * 10);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/SevenZFile.java b/src/org/apache/commons/compress/archivers/sevenz/SevenZFile.java
deleted file mode 100644
index 421c34abac6..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/SevenZFile.java
+++ /dev/null
@@ -1,1211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.BufferedInputStream;
-import java.io.ByteArrayInputStream;
-import java.io.Closeable;
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.CharBuffer;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.charset.StandardCharsets;
-import java.nio.charset.CharsetEncoder;
-import java.nio.file.Files;
-import java.nio.file.StandardOpenOption;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.EnumSet;
-import java.util.LinkedList;
-import java.util.zip.CRC32;
-
-import org.apache.commons.compress.utils.BoundedInputStream;
-import org.apache.commons.compress.utils.CRC32VerifyingInputStream;
-import org.apache.commons.compress.utils.CharsetNames;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * Reads a 7z file, using SeekableByteChannel under
- * the covers.
- *
- * The 7z file format is a flexible container
- * that can contain many compression and
- * encryption types, but at the moment only
- * only Copy, LZMA, LZMA2, BZIP2, Deflate and AES-256 + SHA-256
- * are supported.
- *
- * The format is very Windows/Intel specific,
- * so it uses little-endian byte order,
- * doesn't store user/group or permission bits,
- * and represents times using NTFS timestamps
- * (100 nanosecond units since 1 January 1601).
- * Hence the official tools recommend against
- * using it for backup purposes on *nix, and
- * recommend .tar.7z or .tar.lzma or .tar.xz
- * instead.
- *
- * Both the header and file contents may be
- * compressed and/or encrypted. With both
- * encrypted, neither file names nor file
- * contents can be read, but the use of
- * encryption isn't plausibly deniable.
- *
- * @NotThreadSafe
- * @since 1.6
- */
-public class SevenZFile implements Closeable {
- static final int SIGNATURE_HEADER_SIZE = 32;
-
- private final String fileName;
- private SeekableByteChannel channel;
- private final Archive archive;
- private int currentEntryIndex = -1;
- private int currentFolderIndex = -1;
- private InputStream currentFolderInputStream = null;
- private byte[] password;
-
- private long compressedBytesReadFromCurrentEntry;
- private long uncompressedBytesReadFromCurrentEntry;
-
- private final ArrayList deferredBlockStreams = new ArrayList<>();
-
- // shared with SevenZOutputFile and tests, neither mutates it
- static final byte[] sevenZSignature = { //NOSONAR
- (byte)'7', (byte)'z', (byte)0xBC, (byte)0xAF, (byte)0x27, (byte)0x1C
- };
-
- /**
- * Reads a file as 7z archive
- *
- * @param filename the file to read
- * @param password optional password if the archive is encrypted
- * @throws IOException if reading the archive fails
- * @since 1.17
- */
- public SevenZFile(final File filename, final char[] password) throws IOException {
- this(Files.newByteChannel(filename.toPath(), EnumSet.of(StandardOpenOption.READ)),
- filename.getAbsolutePath(), utf16Decode(password), true);
- }
-
- /**
- * Reads a file as 7z archive
- *
- * @param filename the file to read
- * @param password optional password if the archive is encrypted -
- * the byte array is supposed to be the UTF16-LE encoded
- * representation of the password.
- * @throws IOException if reading the archive fails
- * @deprecated use the char[]-arg version for the password instead
- */
- public SevenZFile(final File filename, final byte[] password) throws IOException {
- this(Files.newByteChannel(filename.toPath(), EnumSet.of(StandardOpenOption.READ)),
- filename.getAbsolutePath(), password, true);
- }
-
- /**
- * Reads a SeekableByteChannel as 7z archive
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to read from an in-memory archive.
- *
- * @param channel the channel to read
- * @throws IOException if reading the archive fails
- * @since 1.13
- */
- public SevenZFile(final SeekableByteChannel channel) throws IOException {
- this(channel, "unknown archive", (char[]) null);
- }
-
- /**
- * Reads a SeekableByteChannel as 7z archive
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to read from an in-memory archive.
- *
- * @param channel the channel to read
- * @param password optional password if the archive is encrypted
- * @throws IOException if reading the archive fails
- * @since 1.17
- */
- public SevenZFile(final SeekableByteChannel channel,
- final char[] password) throws IOException {
- this(channel, "unknown archive", utf16Decode(password));
- }
-
- /**
- * Reads a SeekableByteChannel as 7z archive
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to read from an in-memory archive.
- *
- * @param channel the channel to read
- * @param filename name of the archive - only used for error reporting
- * @param password optional password if the archive is encrypted
- * @throws IOException if reading the archive fails
- * @since 1.17
- */
- public SevenZFile(final SeekableByteChannel channel, String filename,
- final char[] password) throws IOException {
- this(channel, filename, utf16Decode(password), false);
- }
-
- /**
- * Reads a SeekableByteChannel as 7z archive
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to read from an in-memory archive.
- *
- * @param channel the channel to read
- * @param filename name of the archive - only used for error reporting
- * @throws IOException if reading the archive fails
- * @since 1.17
- */
- public SevenZFile(final SeekableByteChannel channel, String filename)
- throws IOException {
- this(channel, filename, null, false);
- }
-
- /**
- * Reads a SeekableByteChannel as 7z archive
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to read from an in-memory archive.
- *
- * @param channel the channel to read
- * @param password optional password if the archive is encrypted -
- * the byte array is supposed to be the UTF16-LE encoded
- * representation of the password.
- * @throws IOException if reading the archive fails
- * @since 1.13
- * @deprecated use the char[]-arg version for the password instead
- */
- public SevenZFile(final SeekableByteChannel channel,
- final byte[] password) throws IOException {
- this(channel, "unknown archive", password);
- }
-
- /**
- * Reads a SeekableByteChannel as 7z archive
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to read from an in-memory archive.
- *
- * @param channel the channel to read
- * @param filename name of the archive - only used for error reporting
- * @param password optional password if the archive is encrypted -
- * the byte array is supposed to be the UTF16-LE encoded
- * representation of the password.
- * @throws IOException if reading the archive fails
- * @since 1.13
- * @deprecated use the char[]-arg version for the password instead
- */
- public SevenZFile(final SeekableByteChannel channel, String filename,
- final byte[] password) throws IOException {
- this(channel, filename, password, false);
- }
-
- private SevenZFile(final SeekableByteChannel channel, String filename,
- final byte[] password, boolean closeOnError) throws IOException {
- boolean succeeded = false;
- this.channel = channel;
- this.fileName = filename;
- try {
- archive = readHeaders(password);
- if (password != null) {
- this.password = Arrays.copyOf(password, password.length);
- } else {
- this.password = null;
- }
- succeeded = true;
- } finally {
- if (!succeeded && closeOnError) {
- this.channel.close();
- }
- }
- }
-
- /**
- * Reads a file as unencrypted 7z archive
- *
- * @param filename the file to read
- * @throws IOException if reading the archive fails
- */
- public SevenZFile(final File filename) throws IOException {
- this(filename, (char[]) null);
- }
-
- /**
- * Closes the archive.
- * @throws IOException if closing the file fails
- */
- @Override
- public void close() throws IOException {
- if (channel != null) {
- try {
- channel.close();
- } finally {
- channel = null;
- if (password != null) {
- Arrays.fill(password, (byte) 0);
- }
- password = null;
- }
- }
- }
-
- /**
- * Returns the next Archive Entry in this archive.
- *
- * @return the next entry,
- * or {@code null} if there are no more entries
- * @throws IOException if the next entry could not be read
- */
- public SevenZArchiveEntry getNextEntry() throws IOException {
- if (currentEntryIndex >= archive.files.length - 1) {
- return null;
- }
- ++currentEntryIndex;
- final SevenZArchiveEntry entry = archive.files[currentEntryIndex];
- buildDecodingStream();
- uncompressedBytesReadFromCurrentEntry = compressedBytesReadFromCurrentEntry = 0;
- return entry;
- }
-
- /**
- * Returns meta-data of all archive entries.
- *
- *
This method only provides meta-data, the entries can not be
- * used to read the contents, you still need to process all
- * entries in order using {@link #getNextEntry} for that.
- *
- *
The content methods are only available for entries that have
- * already been reached via {@link #getNextEntry}.
- *
- * @return meta-data of all archive entries.
- * @since 1.11
- */
- public Iterable getEntries() {
- return Arrays.asList(archive.files);
- }
-
- private Archive readHeaders(final byte[] password) throws IOException {
- ByteBuffer buf = ByteBuffer.allocate(12 /* signature + 2 bytes version + 4 bytes CRC */)
- .order(ByteOrder.LITTLE_ENDIAN);
- readFully(buf);
- final byte[] signature = new byte[6];
- buf.get(signature);
- if (!Arrays.equals(signature, sevenZSignature)) {
- throw new IOException("Bad 7z signature");
- }
- // 7zFormat.txt has it wrong - it's first major then minor
- final byte archiveVersionMajor = buf.get();
- final byte archiveVersionMinor = buf.get();
- if (archiveVersionMajor != 0) {
- throw new IOException(String.format("Unsupported 7z version (%d,%d)",
- archiveVersionMajor, archiveVersionMinor));
- }
-
- final long startHeaderCrc = 0xffffFFFFL & buf.getInt();
- final StartHeader startHeader = readStartHeader(startHeaderCrc);
-
- final int nextHeaderSizeInt = (int) startHeader.nextHeaderSize;
- if (nextHeaderSizeInt != startHeader.nextHeaderSize) {
- throw new IOException("cannot handle nextHeaderSize " + startHeader.nextHeaderSize);
- }
- channel.position(SIGNATURE_HEADER_SIZE + startHeader.nextHeaderOffset);
- buf = ByteBuffer.allocate(nextHeaderSizeInt).order(ByteOrder.LITTLE_ENDIAN);
- readFully(buf);
- final CRC32 crc = new CRC32();
- crc.update(buf.array());
- if (startHeader.nextHeaderCrc != crc.getValue()) {
- throw new IOException("NextHeader CRC mismatch");
- }
-
- Archive archive = new Archive();
- int nid = getUnsignedByte(buf);
- if (nid == NID.kEncodedHeader) {
- buf = readEncodedHeader(buf, archive, password);
- // Archive gets rebuilt with the new header
- archive = new Archive();
- nid = getUnsignedByte(buf);
- }
- if (nid == NID.kHeader) {
- readHeader(buf, archive);
- } else {
- throw new IOException("Broken or unsupported archive: no Header");
- }
- return archive;
- }
-
- private StartHeader readStartHeader(final long startHeaderCrc) throws IOException {
- final StartHeader startHeader = new StartHeader();
- // using Stream rather than ByteBuffer for the benefit of the
- // built-in CRC check
- try (DataInputStream dataInputStream = new DataInputStream(new CRC32VerifyingInputStream(
- new BoundedSeekableByteChannelInputStream(channel, 20), 20, startHeaderCrc))) {
- startHeader.nextHeaderOffset = Long.reverseBytes(dataInputStream.readLong());
- startHeader.nextHeaderSize = Long.reverseBytes(dataInputStream.readLong());
- startHeader.nextHeaderCrc = 0xffffFFFFL & Integer.reverseBytes(dataInputStream.readInt());
- return startHeader;
- }
- }
-
- private void readHeader(final ByteBuffer header, final Archive archive) throws IOException {
- int nid = getUnsignedByte(header);
-
- if (nid == NID.kArchiveProperties) {
- readArchiveProperties(header);
- nid = getUnsignedByte(header);
- }
-
- if (nid == NID.kAdditionalStreamsInfo) {
- throw new IOException("Additional streams unsupported");
- //nid = header.readUnsignedByte();
- }
-
- if (nid == NID.kMainStreamsInfo) {
- readStreamsInfo(header, archive);
- nid = getUnsignedByte(header);
- }
-
- if (nid == NID.kFilesInfo) {
- readFilesInfo(header, archive);
- nid = getUnsignedByte(header);
- }
-
- if (nid != NID.kEnd) {
- throw new IOException("Badly terminated header, found " + nid);
- }
- }
-
- private void readArchiveProperties(final ByteBuffer input) throws IOException {
- // FIXME: the reference implementation just throws them away?
- int nid = getUnsignedByte(input);
- while (nid != NID.kEnd) {
- final long propertySize = readUint64(input);
- final byte[] property = new byte[(int)propertySize];
- input.get(property);
- nid = getUnsignedByte(input);
- }
- }
-
- private ByteBuffer readEncodedHeader(final ByteBuffer header, final Archive archive,
- final byte[] password) throws IOException {
- readStreamsInfo(header, archive);
-
- // FIXME: merge with buildDecodingStream()/buildDecoderStack() at some stage?
- final Folder folder = archive.folders[0];
- final int firstPackStreamIndex = 0;
- final long folderOffset = SIGNATURE_HEADER_SIZE + archive.packPos +
- 0;
-
- channel.position(folderOffset);
- InputStream inputStreamStack = new BoundedSeekableByteChannelInputStream(channel,
- archive.packSizes[firstPackStreamIndex]);
- for (final Coder coder : folder.getOrderedCoders()) {
- if (coder.numInStreams != 1 || coder.numOutStreams != 1) {
- throw new IOException("Multi input/output stream coders are not yet supported");
- }
- inputStreamStack = Coders.addDecoder(fileName, inputStreamStack, //NOSONAR
- folder.getUnpackSizeForCoder(coder), coder, password);
- }
- if (folder.hasCrc) {
- inputStreamStack = new CRC32VerifyingInputStream(inputStreamStack,
- folder.getUnpackSize(), folder.crc);
- }
- final byte[] nextHeader = new byte[(int)folder.getUnpackSize()];
- try (DataInputStream nextHeaderInputStream = new DataInputStream(inputStreamStack)) {
- nextHeaderInputStream.readFully(nextHeader);
- }
- return ByteBuffer.wrap(nextHeader).order(ByteOrder.LITTLE_ENDIAN);
- }
-
- private void readStreamsInfo(final ByteBuffer header, final Archive archive) throws IOException {
- int nid = getUnsignedByte(header);
-
- if (nid == NID.kPackInfo) {
- readPackInfo(header, archive);
- nid = getUnsignedByte(header);
- }
-
- if (nid == NID.kUnpackInfo) {
- readUnpackInfo(header, archive);
- nid = getUnsignedByte(header);
- } else {
- // archive without unpack/coders info
- archive.folders = new Folder[0];
- }
-
- if (nid == NID.kSubStreamsInfo) {
- readSubStreamsInfo(header, archive);
- nid = getUnsignedByte(header);
- }
-
- if (nid != NID.kEnd) {
- throw new IOException("Badly terminated StreamsInfo");
- }
- }
-
- private void readPackInfo(final ByteBuffer header, final Archive archive) throws IOException {
- archive.packPos = readUint64(header);
- final long numPackStreams = readUint64(header);
- int nid = getUnsignedByte(header);
- if (nid == NID.kSize) {
- archive.packSizes = new long[(int)numPackStreams];
- for (int i = 0; i < archive.packSizes.length; i++) {
- archive.packSizes[i] = readUint64(header);
- }
- nid = getUnsignedByte(header);
- }
-
- if (nid == NID.kCRC) {
- archive.packCrcsDefined = readAllOrBits(header, (int)numPackStreams);
- archive.packCrcs = new long[(int)numPackStreams];
- for (int i = 0; i < (int)numPackStreams; i++) {
- if (archive.packCrcsDefined.get(i)) {
- archive.packCrcs[i] = 0xffffFFFFL & header.getInt();
- }
- }
-
- nid = getUnsignedByte(header);
- }
-
- if (nid != NID.kEnd) {
- throw new IOException("Badly terminated PackInfo (" + nid + ")");
- }
- }
-
- private void readUnpackInfo(final ByteBuffer header, final Archive archive) throws IOException {
- int nid = getUnsignedByte(header);
- if (nid != NID.kFolder) {
- throw new IOException("Expected kFolder, got " + nid);
- }
- final long numFolders = readUint64(header);
- final Folder[] folders = new Folder[(int)numFolders];
- archive.folders = folders;
- final int external = getUnsignedByte(header);
- if (external != 0) {
- throw new IOException("External unsupported");
- }
- for (int i = 0; i < (int)numFolders; i++) {
- folders[i] = readFolder(header);
- }
-
- nid = getUnsignedByte(header);
- if (nid != NID.kCodersUnpackSize) {
- throw new IOException("Expected kCodersUnpackSize, got " + nid);
- }
- for (final Folder folder : folders) {
- folder.unpackSizes = new long[(int)folder.totalOutputStreams];
- for (int i = 0; i < folder.totalOutputStreams; i++) {
- folder.unpackSizes[i] = readUint64(header);
- }
- }
-
- nid = getUnsignedByte(header);
- if (nid == NID.kCRC) {
- final BitSet crcsDefined = readAllOrBits(header, (int)numFolders);
- for (int i = 0; i < (int)numFolders; i++) {
- if (crcsDefined.get(i)) {
- folders[i].hasCrc = true;
- folders[i].crc = 0xffffFFFFL & header.getInt();
- } else {
- folders[i].hasCrc = false;
- }
- }
-
- nid = getUnsignedByte(header);
- }
-
- if (nid != NID.kEnd) {
- throw new IOException("Badly terminated UnpackInfo");
- }
- }
-
- private void readSubStreamsInfo(final ByteBuffer header, final Archive archive) throws IOException {
- for (final Folder folder : archive.folders) {
- folder.numUnpackSubStreams = 1;
- }
- int totalUnpackStreams = archive.folders.length;
-
- int nid = getUnsignedByte(header);
- if (nid == NID.kNumUnpackStream) {
- totalUnpackStreams = 0;
- for (final Folder folder : archive.folders) {
- final long numStreams = readUint64(header);
- folder.numUnpackSubStreams = (int)numStreams;
- totalUnpackStreams += numStreams;
- }
- nid = getUnsignedByte(header);
- }
-
- final SubStreamsInfo subStreamsInfo = new SubStreamsInfo();
- subStreamsInfo.unpackSizes = new long[totalUnpackStreams];
- subStreamsInfo.hasCrc = new BitSet(totalUnpackStreams);
- subStreamsInfo.crcs = new long[totalUnpackStreams];
-
- int nextUnpackStream = 0;
- for (final Folder folder : archive.folders) {
- if (folder.numUnpackSubStreams == 0) {
- continue;
- }
- long sum = 0;
- if (nid == NID.kSize) {
- for (int i = 0; i < folder.numUnpackSubStreams - 1; i++) {
- final long size = readUint64(header);
- subStreamsInfo.unpackSizes[nextUnpackStream++] = size;
- sum += size;
- }
- }
- subStreamsInfo.unpackSizes[nextUnpackStream++] = folder.getUnpackSize() - sum;
- }
- if (nid == NID.kSize) {
- nid = getUnsignedByte(header);
- }
-
- int numDigests = 0;
- for (final Folder folder : archive.folders) {
- if (folder.numUnpackSubStreams != 1 || !folder.hasCrc) {
- numDigests += folder.numUnpackSubStreams;
- }
- }
-
- if (nid == NID.kCRC) {
- final BitSet hasMissingCrc = readAllOrBits(header, numDigests);
- final long[] missingCrcs = new long[numDigests];
- for (int i = 0; i < numDigests; i++) {
- if (hasMissingCrc.get(i)) {
- missingCrcs[i] = 0xffffFFFFL & header.getInt();
- }
- }
- int nextCrc = 0;
- int nextMissingCrc = 0;
- for (final Folder folder: archive.folders) {
- if (folder.numUnpackSubStreams == 1 && folder.hasCrc) {
- subStreamsInfo.hasCrc.set(nextCrc, true);
- subStreamsInfo.crcs[nextCrc] = folder.crc;
- ++nextCrc;
- } else {
- for (int i = 0; i < folder.numUnpackSubStreams; i++) {
- subStreamsInfo.hasCrc.set(nextCrc, hasMissingCrc.get(nextMissingCrc));
- subStreamsInfo.crcs[nextCrc] = missingCrcs[nextMissingCrc];
- ++nextCrc;
- ++nextMissingCrc;
- }
- }
- }
-
- nid = getUnsignedByte(header);
- }
-
- if (nid != NID.kEnd) {
- throw new IOException("Badly terminated SubStreamsInfo");
- }
-
- archive.subStreamsInfo = subStreamsInfo;
- }
-
- private Folder readFolder(final ByteBuffer header) throws IOException {
- final Folder folder = new Folder();
-
- final long numCoders = readUint64(header);
- final Coder[] coders = new Coder[(int)numCoders];
- long totalInStreams = 0;
- long totalOutStreams = 0;
- for (int i = 0; i < coders.length; i++) {
- coders[i] = new Coder();
- final int bits = getUnsignedByte(header);
- final int idSize = bits & 0xf;
- final boolean isSimple = (bits & 0x10) == 0;
- final boolean hasAttributes = (bits & 0x20) != 0;
- final boolean moreAlternativeMethods = (bits & 0x80) != 0;
-
- coders[i].decompressionMethodId = new byte[idSize];
- header.get(coders[i].decompressionMethodId);
- if (isSimple) {
- coders[i].numInStreams = 1;
- coders[i].numOutStreams = 1;
- } else {
- coders[i].numInStreams = readUint64(header);
- coders[i].numOutStreams = readUint64(header);
- }
- totalInStreams += coders[i].numInStreams;
- totalOutStreams += coders[i].numOutStreams;
- if (hasAttributes) {
- final long propertiesSize = readUint64(header);
- coders[i].properties = new byte[(int)propertiesSize];
- header.get(coders[i].properties);
- }
- // would need to keep looping as above:
- while (moreAlternativeMethods) {
- throw new IOException("Alternative methods are unsupported, please report. " +
- "The reference implementation doesn't support them either.");
- }
- }
- folder.coders = coders;
- folder.totalInputStreams = totalInStreams;
- folder.totalOutputStreams = totalOutStreams;
-
- if (totalOutStreams == 0) {
- throw new IOException("Total output streams can't be 0");
- }
- final long numBindPairs = totalOutStreams - 1;
- final BindPair[] bindPairs = new BindPair[(int)numBindPairs];
- for (int i = 0; i < bindPairs.length; i++) {
- bindPairs[i] = new BindPair();
- bindPairs[i].inIndex = readUint64(header);
- bindPairs[i].outIndex = readUint64(header);
- }
- folder.bindPairs = bindPairs;
-
- if (totalInStreams < numBindPairs) {
- throw new IOException("Total input streams can't be less than the number of bind pairs");
- }
- final long numPackedStreams = totalInStreams - numBindPairs;
- final long packedStreams[] = new long[(int)numPackedStreams];
- if (numPackedStreams == 1) {
- int i;
- for (i = 0; i < (int)totalInStreams; i++) {
- if (folder.findBindPairForInStream(i) < 0) {
- break;
- }
- }
- if (i == (int)totalInStreams) {
- throw new IOException("Couldn't find stream's bind pair index");
- }
- packedStreams[0] = i;
- } else {
- for (int i = 0; i < (int)numPackedStreams; i++) {
- packedStreams[i] = readUint64(header);
- }
- }
- folder.packedStreams = packedStreams;
-
- return folder;
- }
-
- private BitSet readAllOrBits(final ByteBuffer header, final int size) throws IOException {
- final int areAllDefined = getUnsignedByte(header);
- final BitSet bits;
- if (areAllDefined != 0) {
- bits = new BitSet(size);
- for (int i = 0; i < size; i++) {
- bits.set(i, true);
- }
- } else {
- bits = readBits(header, size);
- }
- return bits;
- }
-
- private BitSet readBits(final ByteBuffer header, final int size) throws IOException {
- final BitSet bits = new BitSet(size);
- int mask = 0;
- int cache = 0;
- for (int i = 0; i < size; i++) {
- if (mask == 0) {
- mask = 0x80;
- cache = getUnsignedByte(header);
- }
- bits.set(i, (cache & mask) != 0);
- mask >>>= 1;
- }
- return bits;
- }
-
- private void readFilesInfo(final ByteBuffer header, final Archive archive) throws IOException {
- final long numFiles = readUint64(header);
- final SevenZArchiveEntry[] files = new SevenZArchiveEntry[(int)numFiles];
- for (int i = 0; i < files.length; i++) {
- files[i] = new SevenZArchiveEntry();
- }
- BitSet isEmptyStream = null;
- BitSet isEmptyFile = null;
- BitSet isAnti = null;
- while (true) {
- final int propertyType = getUnsignedByte(header);
- if (propertyType == 0) {
- break;
- }
- final long size = readUint64(header);
- switch (propertyType) {
- case NID.kEmptyStream: {
- isEmptyStream = readBits(header, files.length);
- break;
- }
- case NID.kEmptyFile: {
- if (isEmptyStream == null) { // protect against NPE
- throw new IOException("Header format error: kEmptyStream must appear before kEmptyFile");
- }
- isEmptyFile = readBits(header, isEmptyStream.cardinality());
- break;
- }
- case NID.kAnti: {
- if (isEmptyStream == null) { // protect against NPE
- throw new IOException("Header format error: kEmptyStream must appear before kAnti");
- }
- isAnti = readBits(header, isEmptyStream.cardinality());
- break;
- }
- case NID.kName: {
- final int external = getUnsignedByte(header);
- if (external != 0) {
- throw new IOException("Not implemented");
- }
- if (((size - 1) & 1) != 0) {
- throw new IOException("File names length invalid");
- }
- final byte[] names = new byte[(int)(size - 1)];
- header.get(names);
- int nextFile = 0;
- int nextName = 0;
- for (int i = 0; i < names.length; i += 2) {
- if (names[i] == 0 && names[i+1] == 0) {
- files[nextFile++].setName(new String(names, nextName, i-nextName, CharsetNames.UTF_16LE));
- nextName = i + 2;
- }
- }
- if (nextName != names.length || nextFile != files.length) {
- throw new IOException("Error parsing file names");
- }
- break;
- }
- case NID.kCTime: {
- final BitSet timesDefined = readAllOrBits(header, files.length);
- final int external = getUnsignedByte(header);
- if (external != 0) {
- throw new IOException("Unimplemented");
- }
- for (int i = 0; i < files.length; i++) {
- files[i].setHasCreationDate(timesDefined.get(i));
- if (files[i].getHasCreationDate()) {
- files[i].setCreationDate(header.getLong());
- }
- }
- break;
- }
- case NID.kATime: {
- final BitSet timesDefined = readAllOrBits(header, files.length);
- final int external = getUnsignedByte(header);
- if (external != 0) {
- throw new IOException("Unimplemented");
- }
- for (int i = 0; i < files.length; i++) {
- files[i].setHasAccessDate(timesDefined.get(i));
- if (files[i].getHasAccessDate()) {
- files[i].setAccessDate(header.getLong());
- }
- }
- break;
- }
- case NID.kMTime: {
- final BitSet timesDefined = readAllOrBits(header, files.length);
- final int external = getUnsignedByte(header);
- if (external != 0) {
- throw new IOException("Unimplemented");
- }
- for (int i = 0; i < files.length; i++) {
- files[i].setHasLastModifiedDate(timesDefined.get(i));
- if (files[i].getHasLastModifiedDate()) {
- files[i].setLastModifiedDate(header.getLong());
- }
- }
- break;
- }
- case NID.kWinAttributes: {
- final BitSet attributesDefined = readAllOrBits(header, files.length);
- final int external = getUnsignedByte(header);
- if (external != 0) {
- throw new IOException("Unimplemented");
- }
- for (int i = 0; i < files.length; i++) {
- files[i].setHasWindowsAttributes(attributesDefined.get(i));
- if (files[i].getHasWindowsAttributes()) {
- files[i].setWindowsAttributes(header.getInt());
- }
- }
- break;
- }
- case NID.kStartPos: {
- throw new IOException("kStartPos is unsupported, please report");
- }
- case NID.kDummy: {
- // 7z 9.20 asserts the content is all zeros and ignores the property
- // Compress up to 1.8.1 would throw an exception, now we ignore it (see COMPRESS-287
-
- if (skipBytesFully(header, size) < size) {
- throw new IOException("Incomplete kDummy property");
- }
- break;
- }
-
- default: {
- // Compress up to 1.8.1 would throw an exception, now we ignore it (see COMPRESS-287
- if (skipBytesFully(header, size) < size) {
- throw new IOException("Incomplete property of type " + propertyType);
- }
- break;
- }
- }
- }
- int nonEmptyFileCounter = 0;
- int emptyFileCounter = 0;
- for (int i = 0; i < files.length; i++) {
- files[i].setHasStream(isEmptyStream == null || !isEmptyStream.get(i));
- if (files[i].hasStream()) {
- files[i].setDirectory(false);
- files[i].setAntiItem(false);
- files[i].setHasCrc(archive.subStreamsInfo.hasCrc.get(nonEmptyFileCounter));
- files[i].setCrcValue(archive.subStreamsInfo.crcs[nonEmptyFileCounter]);
- files[i].setSize(archive.subStreamsInfo.unpackSizes[nonEmptyFileCounter]);
- ++nonEmptyFileCounter;
- } else {
- files[i].setDirectory(isEmptyFile == null || !isEmptyFile.get(emptyFileCounter));
- files[i].setAntiItem(isAnti != null && isAnti.get(emptyFileCounter));
- files[i].setHasCrc(false);
- files[i].setSize(0);
- ++emptyFileCounter;
- }
- }
- archive.files = files;
- calculateStreamMap(archive);
- }
-
- private void calculateStreamMap(final Archive archive) throws IOException {
- final StreamMap streamMap = new StreamMap();
-
- int nextFolderPackStreamIndex = 0;
- final int numFolders = archive.folders != null ? archive.folders.length : 0;
- streamMap.folderFirstPackStreamIndex = new int[numFolders];
- for (int i = 0; i < numFolders; i++) {
- streamMap.folderFirstPackStreamIndex[i] = nextFolderPackStreamIndex;
- nextFolderPackStreamIndex += archive.folders[i].packedStreams.length;
- }
-
- long nextPackStreamOffset = 0;
- final int numPackSizes = archive.packSizes != null ? archive.packSizes.length : 0;
- streamMap.packStreamOffsets = new long[numPackSizes];
- for (int i = 0; i < numPackSizes; i++) {
- streamMap.packStreamOffsets[i] = nextPackStreamOffset;
- nextPackStreamOffset += archive.packSizes[i];
- }
-
- streamMap.folderFirstFileIndex = new int[numFolders];
- streamMap.fileFolderIndex = new int[archive.files.length];
- int nextFolderIndex = 0;
- int nextFolderUnpackStreamIndex = 0;
- for (int i = 0; i < archive.files.length; i++) {
- if (!archive.files[i].hasStream() && nextFolderUnpackStreamIndex == 0) {
- streamMap.fileFolderIndex[i] = -1;
- continue;
- }
- if (nextFolderUnpackStreamIndex == 0) {
- for (; nextFolderIndex < archive.folders.length; ++nextFolderIndex) {
- streamMap.folderFirstFileIndex[nextFolderIndex] = i;
- if (archive.folders[nextFolderIndex].numUnpackSubStreams > 0) {
- break;
- }
- }
- if (nextFolderIndex >= archive.folders.length) {
- throw new IOException("Too few folders in archive");
- }
- }
- streamMap.fileFolderIndex[i] = nextFolderIndex;
- if (!archive.files[i].hasStream()) {
- continue;
- }
- ++nextFolderUnpackStreamIndex;
- if (nextFolderUnpackStreamIndex >= archive.folders[nextFolderIndex].numUnpackSubStreams) {
- ++nextFolderIndex;
- nextFolderUnpackStreamIndex = 0;
- }
- }
-
- archive.streamMap = streamMap;
- }
-
- private void buildDecodingStream() throws IOException {
- final int folderIndex = archive.streamMap.fileFolderIndex[currentEntryIndex];
- if (folderIndex < 0) {
- deferredBlockStreams.clear();
- // TODO: previously it'd return an empty stream?
- // new BoundedInputStream(new ByteArrayInputStream(new byte[0]), 0);
- return;
- }
- final SevenZArchiveEntry file = archive.files[currentEntryIndex];
- if (currentFolderIndex == folderIndex) {
- // (COMPRESS-320).
- // The current entry is within the same (potentially opened) folder. The
- // previous stream has to be fully decoded before we can start reading
- // but don't do it eagerly -- if the user skips over the entire folder nothing
- // is effectively decompressed.
-
- file.setContentMethods(archive.files[currentEntryIndex - 1].getContentMethods());
- } else {
- // We're opening a new folder. Discard any queued streams/ folder stream.
- currentFolderIndex = folderIndex;
- deferredBlockStreams.clear();
- if (currentFolderInputStream != null) {
- currentFolderInputStream.close();
- currentFolderInputStream = null;
- }
-
- final Folder folder = archive.folders[folderIndex];
- final int firstPackStreamIndex = archive.streamMap.folderFirstPackStreamIndex[folderIndex];
- final long folderOffset = SIGNATURE_HEADER_SIZE + archive.packPos +
- archive.streamMap.packStreamOffsets[firstPackStreamIndex];
- currentFolderInputStream = buildDecoderStack(folder, folderOffset, firstPackStreamIndex, file);
- }
-
- InputStream fileStream = new BoundedInputStream(currentFolderInputStream, file.getSize());
- if (file.getHasCrc()) {
- fileStream = new CRC32VerifyingInputStream(fileStream, file.getSize(), file.getCrcValue());
- }
-
- deferredBlockStreams.add(fileStream);
- }
-
- private InputStream buildDecoderStack(final Folder folder, final long folderOffset,
- final int firstPackStreamIndex, final SevenZArchiveEntry entry) throws IOException {
- channel.position(folderOffset);
- InputStream inputStreamStack = new FilterInputStream(new BufferedInputStream(
- new BoundedSeekableByteChannelInputStream(channel,
- archive.packSizes[firstPackStreamIndex]))) {
- @Override
- public int read() throws IOException {
- final int r = in.read();
- if (r >= 0) {
- count(1);
- }
- return r;
- }
- @Override
- public int read(final byte[] b) throws IOException {
- return read(b, 0, b.length);
- }
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- final int r = in.read(b, off, len);
- if (r >= 0) {
- count(r);
- }
- return r;
- }
- private void count(int c) {
- compressedBytesReadFromCurrentEntry += c;
- }
- };
- final LinkedList methods = new LinkedList<>();
- for (final Coder coder : folder.getOrderedCoders()) {
- if (coder.numInStreams != 1 || coder.numOutStreams != 1) {
- throw new IOException("Multi input/output stream coders are not yet supported");
- }
- final SevenZMethod method = SevenZMethod.byId(coder.decompressionMethodId);
- inputStreamStack = Coders.addDecoder(fileName, inputStreamStack,
- folder.getUnpackSizeForCoder(coder), coder, password);
- methods.addFirst(new SevenZMethodConfiguration(method,
- Coders.findByMethod(method).getOptionsFromCoder(coder, inputStreamStack)));
- }
- entry.setContentMethods(methods);
- if (folder.hasCrc) {
- return new CRC32VerifyingInputStream(inputStreamStack,
- folder.getUnpackSize(), folder.crc);
- }
- return inputStreamStack;
- }
-
- /**
- * Reads a byte of data.
- *
- * @return the byte read, or -1 if end of input is reached
- * @throws IOException
- * if an I/O error has occurred
- */
- public int read() throws IOException {
- int b = getCurrentStream().read();
- if (b >= 0) {
- uncompressedBytesReadFromCurrentEntry++;
- }
- return b;
- }
-
- private InputStream getCurrentStream() throws IOException {
- if (archive.files[currentEntryIndex].getSize() == 0) {
- return new ByteArrayInputStream(new byte[0]);
- }
- if (deferredBlockStreams.isEmpty()) {
- throw new IllegalStateException("No current 7z entry (call getNextEntry() first).");
- }
-
- while (deferredBlockStreams.size() > 1) {
- // In solid compression mode we need to decompress all leading folder'
- // streams to get access to an entry. We defer this until really needed
- // so that entire blocks can be skipped without wasting time for decompression.
- try (final InputStream stream = deferredBlockStreams.remove(0)) {
- IOUtils.skip(stream, Long.MAX_VALUE);
- }
- compressedBytesReadFromCurrentEntry = 0;
- }
-
- return deferredBlockStreams.get(0);
- }
-
- /**
- * Reads data into an array of bytes.
- *
- * @param b the array to write data to
- * @return the number of bytes read, or -1 if end of input is reached
- * @throws IOException
- * if an I/O error has occurred
- */
- public int read(final byte[] b) throws IOException {
- return read(b, 0, b.length);
- }
-
- /**
- * Reads data into an array of bytes.
- *
- * @param b the array to write data to
- * @param off offset into the buffer to start filling at
- * @param len of bytes to read
- * @return the number of bytes read, or -1 if end of input is reached
- * @throws IOException
- * if an I/O error has occurred
- */
- public int read(final byte[] b, final int off, final int len) throws IOException {
- int cnt = getCurrentStream().read(b, off, len);
- if (cnt > 0) {
- uncompressedBytesReadFromCurrentEntry += cnt;
- }
- return cnt;
- }
-
- /**
- * Provides statistics for bytes read from the current entry.
- *
- * @return statistics for bytes read from the current entry
- * @since 1.17
- */
- public InputStreamStatistics getStatisticsForCurrentEntry() {
- return new InputStreamStatistics() {
- @Override
- public long getCompressedCount() {
- return compressedBytesReadFromCurrentEntry;
- }
- @Override
- public long getUncompressedCount() {
- return uncompressedBytesReadFromCurrentEntry;
- }
- };
- }
-
- private static long readUint64(final ByteBuffer in) throws IOException {
- // long rather than int as it might get shifted beyond the range of an int
- final long firstByte = getUnsignedByte(in);
- int mask = 0x80;
- long value = 0;
- for (int i = 0; i < 8; i++) {
- if ((firstByte & mask) == 0) {
- return value | ((firstByte & (mask - 1)) << (8 * i));
- }
- final long nextByte = getUnsignedByte(in);
- value |= nextByte << (8 * i);
- mask >>>= 1;
- }
- return value;
- }
-
- private static int getUnsignedByte(ByteBuffer buf) {
- return buf.get() & 0xff;
- }
-
- /**
- * Checks if the signature matches what is expected for a 7z file.
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this is the signature of a 7z archive.
- * @since 1.8
- */
- public static boolean matches(final byte[] signature, final int length) {
- if (length < sevenZSignature.length) {
- return false;
- }
-
- for (int i = 0; i < sevenZSignature.length; i++) {
- if (signature[i] != sevenZSignature[i]) {
- return false;
- }
- }
- return true;
- }
-
- private static long skipBytesFully(final ByteBuffer input, long bytesToSkip) throws IOException {
- if (bytesToSkip < 1) {
- return 0;
- }
- int current = input.position();
- int maxSkip = input.remaining();
- if (maxSkip < bytesToSkip) {
- bytesToSkip = maxSkip;
- }
- input.position(current + (int) bytesToSkip);
- return bytesToSkip;
- }
-
- private void readFully(ByteBuffer buf) throws IOException {
- buf.rewind();
- IOUtils.readFully(channel, buf);
- buf.flip();
- }
-
- @Override
- public String toString() {
- return archive.toString();
- }
-
- private static final CharsetEncoder PASSWORD_ENCODER = StandardCharsets.UTF_16LE.newEncoder();
-
- private static byte[] utf16Decode(char[] chars) throws IOException {
- if (chars == null) {
- return null;
- }
- ByteBuffer encoded = PASSWORD_ENCODER.encode(CharBuffer.wrap(chars));
- if (encoded.hasArray()) {
- return encoded.array();
- }
- byte[] e = new byte[encoded.remaining()];
- encoded.get(e);
- return e;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/SevenZMethod.java b/src/org/apache/commons/compress/archivers/sevenz/SevenZMethod.java
deleted file mode 100644
index 3c446cc2c1e..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/SevenZMethod.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.util.Arrays;
-
-/**
- * The (partially) supported compression/encryption methods used in 7z archives.
- *
- *
All methods with a _FILTER suffix are used as preprocessors with
- * the goal of creating a better compression ratio with the compressor
- * that comes next in the chain of methods. 7z will in general only
- * allow them to be used together with a "real" compression method but
- * Commons Compress doesn't enforce this.
- *
- *
The BCJ_ filters work on executable files for the given platform
- * and convert relative addresses to absolute addresses in CALL
- * instructions. This means they are only useful when applied to
- * executables of the chosen platform.
- */
-public enum SevenZMethod {
- /** no compression at all */
- COPY(new byte[] { (byte)0x00 }),
- /** LZMA - only supported when reading */
- LZMA(new byte[] { (byte)0x03, (byte)0x01, (byte)0x01 }),
- /** LZMA2 */
- LZMA2(new byte[] { (byte)0x21 }),
- /** Deflate */
- DEFLATE(new byte[] { (byte)0x04, (byte)0x01, (byte)0x08 }),
- /**
- * Deflate64
- * @since 1.16
- */
- DEFLATE64(new byte[] { (byte)0x04, (byte)0x01, (byte)0x09 }),
- /** BZIP2 */
- BZIP2(new byte[] { (byte)0x04, (byte)0x02, (byte)0x02 }),
- /**
- * AES encryption with a key length of 256 bit using SHA256 for
- * hashes - only supported when reading
- */
- AES256SHA256(new byte[] { (byte)0x06, (byte)0xf1, (byte)0x07, (byte)0x01 }),
- /**
- * BCJ x86 platform version 1.
- * @since 1.8
- */
- BCJ_X86_FILTER(new byte[] { 0x03, 0x03, 0x01, 0x03 }),
- /**
- * BCJ PowerPC platform.
- * @since 1.8
- */
- BCJ_PPC_FILTER(new byte[] { 0x03, 0x03, 0x02, 0x05 }),
- /**
- * BCJ I64 platform.
- * @since 1.8
- */
- BCJ_IA64_FILTER(new byte[] { 0x03, 0x03, 0x04, 0x01 }),
- /**
- * BCJ ARM platform.
- * @since 1.8
- */
- BCJ_ARM_FILTER(new byte[] { 0x03, 0x03, 0x05, 0x01 }),
- /**
- * BCJ ARM Thumb platform.
- * @since 1.8
- */
- BCJ_ARM_THUMB_FILTER(new byte[] { 0x03, 0x03, 0x07, 0x01 }),
- /**
- * BCJ Sparc platform.
- * @since 1.8
- */
- BCJ_SPARC_FILTER(new byte[] { 0x03, 0x03, 0x08, 0x05 }),
- /**
- * Delta filter.
- * @since 1.8
- */
- DELTA_FILTER(new byte[] { 0x03 });
-
- private final byte[] id;
-
- SevenZMethod(final byte[] id) {
- this.id = id;
- }
-
- byte[] getId() {
- final byte[] copy = new byte[id.length];
- System.arraycopy(id, 0, copy, 0, id.length);
- return copy;
- }
-
- static SevenZMethod byId(final byte[] id) {
- for (final SevenZMethod m : SevenZMethod.class.getEnumConstants()) {
- if (Arrays.equals(m.id, id)) {
- return m;
- }
- }
- return null;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/SevenZMethodConfiguration.java b/src/org/apache/commons/compress/archivers/sevenz/SevenZMethodConfiguration.java
deleted file mode 100644
index bc47ee0a638..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/SevenZMethodConfiguration.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-/**
- * Combines a SevenZMethod with configuration options for the method.
- *
- *
The exact type and interpretation of options depends on the
- * method being configured. Currently supported are:
- *
- *
- *
Method
Option Type
Description
- *
BZIP2
Number
Block Size - an number between 1 and 9
- *
DEFLATE
Number
Compression Level - an number between 1 and 9
- *
LZMA2
Number
Dictionary Size - a number between 4096 and 768 MiB (768 << 20)
- *
LZMA2
org.tukaani.xz.LZMA2Options
Whole set of LZMA2 options.
- *
DELTA_FILTER
Number
Delta Distance - a number between 1 and 256
- *
- *
- * @Immutable
- * @since 1.8
- */
-public class SevenZMethodConfiguration {
- private final SevenZMethod method;
- private final Object options;
-
- /**
- * Doesn't configure any additional options.
- * @param method the method to use
- */
- public SevenZMethodConfiguration(final SevenZMethod method) {
- this(method, null);
- }
-
- /**
- * Specifies and method plus configuration options.
- * @param method the method to use
- * @param options the options to use
- * @throws IllegalArgumentException if the method doesn't understand the options specified.
- */
- public SevenZMethodConfiguration(final SevenZMethod method, final Object options) {
- this.method = method;
- this.options = options;
- if (options != null && !Coders.findByMethod(method).canAcceptOptions(options)) {
- throw new IllegalArgumentException("The " + method + " method doesn't support options of type "
- + options.getClass());
- }
- }
-
- /**
- * The specified method.
- * @return the method
- */
- public SevenZMethod getMethod() {
- return method;
- }
-
- /**
- * The specified options.
- * @return the options
- */
- public Object getOptions() {
- return options;
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/SevenZOutputFile.java b/src/org/apache/commons/compress/archivers/sevenz/SevenZOutputFile.java
deleted file mode 100644
index 2ed21752d4f..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/SevenZOutputFile.java
+++ /dev/null
@@ -1,808 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.io.ByteArrayOutputStream;
-import java.io.Closeable;
-import java.io.DataOutput;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.file.Files;
-import java.nio.file.StandardOpenOption;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.Date;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.zip.CRC32;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.utils.CountingOutputStream;
-
-/**
- * Writes a 7z file.
- * @since 1.6
- */
-public class SevenZOutputFile implements Closeable {
- private final SeekableByteChannel channel;
- private final List files = new ArrayList<>();
- private int numNonEmptyStreams = 0;
- private final CRC32 crc32 = new CRC32();
- private final CRC32 compressedCrc32 = new CRC32();
- private long fileBytesWritten = 0;
- private boolean finished = false;
- private CountingOutputStream currentOutputStream;
- private CountingOutputStream[] additionalCountingStreams;
- private Iterable extends SevenZMethodConfiguration> contentMethods =
- Collections.singletonList(new SevenZMethodConfiguration(SevenZMethod.LZMA2));
- private final Map additionalSizes = new HashMap<>();
-
- /**
- * Opens file to write a 7z archive to.
- *
- * @param filename the file to write to
- * @throws IOException if opening the file fails
- */
- public SevenZOutputFile(final File filename) throws IOException {
- this(Files.newByteChannel(filename.toPath(),
- EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.WRITE,
- StandardOpenOption.TRUNCATE_EXISTING)));
- }
-
- /**
- * Prepares channel to write a 7z archive to.
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to write to an in-memory archive.
- *
- * @param channel the channel to write to
- * @throws IOException if the channel cannot be positioned properly
- * @since 1.13
- */
- public SevenZOutputFile(final SeekableByteChannel channel) throws IOException {
- this.channel = channel;
- channel.position(SevenZFile.SIGNATURE_HEADER_SIZE);
- }
-
- /**
- * Sets the default compression method to use for entry contents - the
- * default is LZMA2.
- *
- *
Currently only {@link SevenZMethod#COPY}, {@link
- * SevenZMethod#LZMA2}, {@link SevenZMethod#BZIP2} and {@link
- * SevenZMethod#DEFLATE} are supported.
- *
- *
This is a short form for passing a single-element iterable
- * to {@link #setContentMethods}.
- * @param method the default compression method
- */
- public void setContentCompression(final SevenZMethod method) {
- setContentMethods(Collections.singletonList(new SevenZMethodConfiguration(method)));
- }
-
- /**
- * Sets the default (compression) methods to use for entry contents - the
- * default is LZMA2.
- *
- *
Currently only {@link SevenZMethod#COPY}, {@link
- * SevenZMethod#LZMA2}, {@link SevenZMethod#BZIP2} and {@link
- * SevenZMethod#DEFLATE} are supported.
- *
- *
The methods will be consulted in iteration order to create
- * the final output.
- *
- * @since 1.8
- * @param methods the default (compression) methods
- */
- public void setContentMethods(final Iterable extends SevenZMethodConfiguration> methods) {
- this.contentMethods = reverse(methods);
- }
-
- /**
- * Closes the archive, calling {@link #finish} if necessary.
- *
- * @throws IOException on error
- */
- @Override
- public void close() throws IOException {
- if (!finished) {
- finish();
- }
- channel.close();
- }
-
- /**
- * Create an archive entry using the inputFile and entryName provided.
- *
- * @param inputFile file to create an entry from
- * @param entryName the name to use
- * @return the ArchiveEntry set up with details from the file
- *
- * @throws IOException on error
- */
- public SevenZArchiveEntry createArchiveEntry(final File inputFile,
- final String entryName) throws IOException {
- final SevenZArchiveEntry entry = new SevenZArchiveEntry();
- entry.setDirectory(inputFile.isDirectory());
- entry.setName(entryName);
- entry.setLastModifiedDate(new Date(inputFile.lastModified()));
- return entry;
- }
-
- /**
- * Records an archive entry to add.
- *
- * The caller must then write the content to the archive and call
- * {@link #closeArchiveEntry()} to complete the process.
- *
- * @param archiveEntry describes the entry
- * @throws IOException on error
- */
- public void putArchiveEntry(final ArchiveEntry archiveEntry) throws IOException {
- final SevenZArchiveEntry entry = (SevenZArchiveEntry) archiveEntry;
- files.add(entry);
- }
-
- /**
- * Closes the archive entry.
- * @throws IOException on error
- */
- public void closeArchiveEntry() throws IOException {
- if (currentOutputStream != null) {
- currentOutputStream.flush();
- currentOutputStream.close();
- }
-
- final SevenZArchiveEntry entry = files.get(files.size() - 1);
- if (fileBytesWritten > 0) { // this implies currentOutputStream != null
- entry.setHasStream(true);
- ++numNonEmptyStreams;
- entry.setSize(currentOutputStream.getBytesWritten()); //NOSONAR
- entry.setCompressedSize(fileBytesWritten);
- entry.setCrcValue(crc32.getValue());
- entry.setCompressedCrcValue(compressedCrc32.getValue());
- entry.setHasCrc(true);
- if (additionalCountingStreams != null) {
- final long[] sizes = new long[additionalCountingStreams.length];
- for (int i = 0; i < additionalCountingStreams.length; i++) {
- sizes[i] = additionalCountingStreams[i].getBytesWritten();
- }
- additionalSizes.put(entry, sizes);
- }
- } else {
- entry.setHasStream(false);
- entry.setSize(0);
- entry.setCompressedSize(0);
- entry.setHasCrc(false);
- }
- currentOutputStream = null;
- additionalCountingStreams = null;
- crc32.reset();
- compressedCrc32.reset();
- fileBytesWritten = 0;
- }
-
- /**
- * Writes a byte to the current archive entry.
- * @param b The byte to be written.
- * @throws IOException on error
- */
- public void write(final int b) throws IOException {
- getCurrentOutputStream().write(b);
- }
-
- /**
- * Writes a byte array to the current archive entry.
- * @param b The byte array to be written.
- * @throws IOException on error
- */
- public void write(final byte[] b) throws IOException {
- write(b, 0, b.length);
- }
-
- /**
- * Writes part of a byte array to the current archive entry.
- * @param b The byte array to be written.
- * @param off offset into the array to start writing from
- * @param len number of bytes to write
- * @throws IOException on error
- */
- public void write(final byte[] b, final int off, final int len) throws IOException {
- if (len > 0) {
- getCurrentOutputStream().write(b, off, len);
- }
- }
-
- /**
- * Finishes the addition of entries to this archive, without closing it.
- *
- * @throws IOException if archive is already closed.
- */
- public void finish() throws IOException {
- if (finished) {
- throw new IOException("This archive has already been finished");
- }
- finished = true;
-
- final long headerPosition = channel.position();
-
- final ByteArrayOutputStream headerBaos = new ByteArrayOutputStream();
- final DataOutputStream header = new DataOutputStream(headerBaos);
-
- writeHeader(header);
- header.flush();
- final byte[] headerBytes = headerBaos.toByteArray();
- channel.write(ByteBuffer.wrap(headerBytes));
-
- final CRC32 crc32 = new CRC32();
- crc32.update(headerBytes);
-
- ByteBuffer bb = ByteBuffer.allocate(SevenZFile.sevenZSignature.length
- + 2 /* version */
- + 4 /* start header CRC */
- + 8 /* next header position */
- + 8 /* next header length */
- + 4 /* next header CRC */)
- .order(ByteOrder.LITTLE_ENDIAN);
- // signature header
- channel.position(0);
- bb.put(SevenZFile.sevenZSignature);
- // version
- bb.put((byte) 0).put((byte) 2);
-
- // placeholder for start header CRC
- bb.putInt(0);
-
- // start header
- bb.putLong(headerPosition - SevenZFile.SIGNATURE_HEADER_SIZE)
- .putLong(0xffffFFFFL & headerBytes.length)
- .putInt((int) crc32.getValue());
- crc32.reset();
- crc32.update(bb.array(), SevenZFile.sevenZSignature.length + 6, 20);
- bb.putInt(SevenZFile.sevenZSignature.length + 2, (int) crc32.getValue());
- bb.flip();
- channel.write(bb);
- }
-
- /*
- * Creation of output stream is deferred until data is actually
- * written as some codecs might write header information even for
- * empty streams and directories otherwise.
- */
- private OutputStream getCurrentOutputStream() throws IOException {
- if (currentOutputStream == null) {
- currentOutputStream = setupFileOutputStream();
- }
- return currentOutputStream;
- }
-
- private CountingOutputStream setupFileOutputStream() throws IOException {
- if (files.isEmpty()) {
- throw new IllegalStateException("No current 7z entry");
- }
-
- OutputStream out = new OutputStreamWrapper();
- final ArrayList moreStreams = new ArrayList<>();
- boolean first = true;
- for (final SevenZMethodConfiguration m : getContentMethods(files.get(files.size() - 1))) {
- if (!first) {
- final CountingOutputStream cos = new CountingOutputStream(out);
- moreStreams.add(cos);
- out = cos;
- }
- out = Coders.addEncoder(out, m.getMethod(), m.getOptions());
- first = false;
- }
- if (!moreStreams.isEmpty()) {
- additionalCountingStreams = moreStreams.toArray(new CountingOutputStream[moreStreams.size()]);
- }
- return new CountingOutputStream(out) {
- @Override
- public void write(final int b) throws IOException {
- super.write(b);
- crc32.update(b);
- }
-
- @Override
- public void write(final byte[] b) throws IOException {
- super.write(b);
- crc32.update(b);
- }
-
- @Override
- public void write(final byte[] b, final int off, final int len)
- throws IOException {
- super.write(b, off, len);
- crc32.update(b, off, len);
- }
- };
- }
-
- private Iterable extends SevenZMethodConfiguration> getContentMethods(final SevenZArchiveEntry entry) {
- final Iterable extends SevenZMethodConfiguration> ms = entry.getContentMethods();
- return ms == null ? contentMethods : ms;
- }
-
- private void writeHeader(final DataOutput header) throws IOException {
- header.write(NID.kHeader);
-
- header.write(NID.kMainStreamsInfo);
- writeStreamsInfo(header);
- writeFilesInfo(header);
- header.write(NID.kEnd);
- }
-
- private void writeStreamsInfo(final DataOutput header) throws IOException {
- if (numNonEmptyStreams > 0) {
- writePackInfo(header);
- writeUnpackInfo(header);
- }
-
- writeSubStreamsInfo(header);
-
- header.write(NID.kEnd);
- }
-
- private void writePackInfo(final DataOutput header) throws IOException {
- header.write(NID.kPackInfo);
-
- writeUint64(header, 0);
- writeUint64(header, 0xffffFFFFL & numNonEmptyStreams);
-
- header.write(NID.kSize);
- for (final SevenZArchiveEntry entry : files) {
- if (entry.hasStream()) {
- writeUint64(header, entry.getCompressedSize());
- }
- }
-
- header.write(NID.kCRC);
- header.write(1); // "allAreDefined" == true
- for (final SevenZArchiveEntry entry : files) {
- if (entry.hasStream()) {
- header.writeInt(Integer.reverseBytes((int) entry.getCompressedCrcValue()));
- }
- }
-
- header.write(NID.kEnd);
- }
-
- private void writeUnpackInfo(final DataOutput header) throws IOException {
- header.write(NID.kUnpackInfo);
-
- header.write(NID.kFolder);
- writeUint64(header, numNonEmptyStreams);
- header.write(0);
- for (final SevenZArchiveEntry entry : files) {
- if (entry.hasStream()) {
- writeFolder(header, entry);
- }
- }
-
- header.write(NID.kCodersUnpackSize);
- for (final SevenZArchiveEntry entry : files) {
- if (entry.hasStream()) {
- final long[] moreSizes = additionalSizes.get(entry);
- if (moreSizes != null) {
- for (final long s : moreSizes) {
- writeUint64(header, s);
- }
- }
- writeUint64(header, entry.getSize());
- }
- }
-
- header.write(NID.kCRC);
- header.write(1); // "allAreDefined" == true
- for (final SevenZArchiveEntry entry : files) {
- if (entry.hasStream()) {
- header.writeInt(Integer.reverseBytes((int) entry.getCrcValue()));
- }
- }
-
- header.write(NID.kEnd);
- }
-
- private void writeFolder(final DataOutput header, final SevenZArchiveEntry entry) throws IOException {
- final ByteArrayOutputStream bos = new ByteArrayOutputStream();
- int numCoders = 0;
- for (final SevenZMethodConfiguration m : getContentMethods(entry)) {
- numCoders++;
- writeSingleCodec(m, bos);
- }
-
- writeUint64(header, numCoders);
- header.write(bos.toByteArray());
- for (long i = 0; i < numCoders - 1; i++) {
- writeUint64(header, i + 1);
- writeUint64(header, i);
- }
- }
-
- private void writeSingleCodec(final SevenZMethodConfiguration m, final OutputStream bos) throws IOException {
- final byte[] id = m.getMethod().getId();
- final byte[] properties = Coders.findByMethod(m.getMethod())
- .getOptionsAsProperties(m.getOptions());
-
- int codecFlags = id.length;
- if (properties.length > 0) {
- codecFlags |= 0x20;
- }
- bos.write(codecFlags);
- bos.write(id);
-
- if (properties.length > 0) {
- bos.write(properties.length);
- bos.write(properties);
- }
- }
-
- private void writeSubStreamsInfo(final DataOutput header) throws IOException {
- header.write(NID.kSubStreamsInfo);
-//
-// header.write(NID.kCRC);
-// header.write(1);
-// for (final SevenZArchiveEntry entry : files) {
-// if (entry.getHasCrc()) {
-// header.writeInt(Integer.reverseBytes(entry.getCrc()));
-// }
-// }
-//
- header.write(NID.kEnd);
- }
-
- private void writeFilesInfo(final DataOutput header) throws IOException {
- header.write(NID.kFilesInfo);
-
- writeUint64(header, files.size());
-
- writeFileEmptyStreams(header);
- writeFileEmptyFiles(header);
- writeFileAntiItems(header);
- writeFileNames(header);
- writeFileCTimes(header);
- writeFileATimes(header);
- writeFileMTimes(header);
- writeFileWindowsAttributes(header);
- header.write(NID.kEnd);
- }
-
- private void writeFileEmptyStreams(final DataOutput header) throws IOException {
- boolean hasEmptyStreams = false;
- for (final SevenZArchiveEntry entry : files) {
- if (!entry.hasStream()) {
- hasEmptyStreams = true;
- break;
- }
- }
- if (hasEmptyStreams) {
- header.write(NID.kEmptyStream);
- final BitSet emptyStreams = new BitSet(files.size());
- for (int i = 0; i < files.size(); i++) {
- emptyStreams.set(i, !files.get(i).hasStream());
- }
- final ByteArrayOutputStream baos = new ByteArrayOutputStream();
- final DataOutputStream out = new DataOutputStream(baos);
- writeBits(out, emptyStreams, files.size());
- out.flush();
- final byte[] contents = baos.toByteArray();
- writeUint64(header, contents.length);
- header.write(contents);
- }
- }
-
- private void writeFileEmptyFiles(final DataOutput header) throws IOException {
- boolean hasEmptyFiles = false;
- int emptyStreamCounter = 0;
- final BitSet emptyFiles = new BitSet(0);
- for (final SevenZArchiveEntry file1 : files) {
- if (!file1.hasStream()) {
- final boolean isDir = file1.isDirectory();
- emptyFiles.set(emptyStreamCounter++, !isDir);
- hasEmptyFiles |= !isDir;
- }
- }
- if (hasEmptyFiles) {
- header.write(NID.kEmptyFile);
- final ByteArrayOutputStream baos = new ByteArrayOutputStream();
- final DataOutputStream out = new DataOutputStream(baos);
- writeBits(out, emptyFiles, emptyStreamCounter);
- out.flush();
- final byte[] contents = baos.toByteArray();
- writeUint64(header, contents.length);
- header.write(contents);
- }
- }
-
- private void writeFileAntiItems(final DataOutput header) throws IOException {
- boolean hasAntiItems = false;
- final BitSet antiItems = new BitSet(0);
- int antiItemCounter = 0;
- for (final SevenZArchiveEntry file1 : files) {
- if (!file1.hasStream()) {
- final boolean isAnti = file1.isAntiItem();
- antiItems.set(antiItemCounter++, isAnti);
- hasAntiItems |= isAnti;
- }
- }
- if (hasAntiItems) {
- header.write(NID.kAnti);
- final ByteArrayOutputStream baos = new ByteArrayOutputStream();
- final DataOutputStream out = new DataOutputStream(baos);
- writeBits(out, antiItems, antiItemCounter);
- out.flush();
- final byte[] contents = baos.toByteArray();
- writeUint64(header, contents.length);
- header.write(contents);
- }
- }
-
- private void writeFileNames(final DataOutput header) throws IOException {
- header.write(NID.kName);
-
- final ByteArrayOutputStream baos = new ByteArrayOutputStream();
- final DataOutputStream out = new DataOutputStream(baos);
- out.write(0);
- for (final SevenZArchiveEntry entry : files) {
- out.write(entry.getName().getBytes("UTF-16LE"));
- out.writeShort(0);
- }
- out.flush();
- final byte[] contents = baos.toByteArray();
- writeUint64(header, contents.length);
- header.write(contents);
- }
-
- private void writeFileCTimes(final DataOutput header) throws IOException {
- int numCreationDates = 0;
- for (final SevenZArchiveEntry entry : files) {
- if (entry.getHasCreationDate()) {
- ++numCreationDates;
- }
- }
- if (numCreationDates > 0) {
- header.write(NID.kCTime);
-
- final ByteArrayOutputStream baos = new ByteArrayOutputStream();
- final DataOutputStream out = new DataOutputStream(baos);
- if (numCreationDates != files.size()) {
- out.write(0);
- final BitSet cTimes = new BitSet(files.size());
- for (int i = 0; i < files.size(); i++) {
- cTimes.set(i, files.get(i).getHasCreationDate());
- }
- writeBits(out, cTimes, files.size());
- } else {
- out.write(1); // "allAreDefined" == true
- }
- out.write(0);
- for (final SevenZArchiveEntry entry : files) {
- if (entry.getHasCreationDate()) {
- out.writeLong(Long.reverseBytes(
- SevenZArchiveEntry.javaTimeToNtfsTime(entry.getCreationDate())));
- }
- }
- out.flush();
- final byte[] contents = baos.toByteArray();
- writeUint64(header, contents.length);
- header.write(contents);
- }
- }
-
- private void writeFileATimes(final DataOutput header) throws IOException {
- int numAccessDates = 0;
- for (final SevenZArchiveEntry entry : files) {
- if (entry.getHasAccessDate()) {
- ++numAccessDates;
- }
- }
- if (numAccessDates > 0) {
- header.write(NID.kATime);
-
- final ByteArrayOutputStream baos = new ByteArrayOutputStream();
- final DataOutputStream out = new DataOutputStream(baos);
- if (numAccessDates != files.size()) {
- out.write(0);
- final BitSet aTimes = new BitSet(files.size());
- for (int i = 0; i < files.size(); i++) {
- aTimes.set(i, files.get(i).getHasAccessDate());
- }
- writeBits(out, aTimes, files.size());
- } else {
- out.write(1); // "allAreDefined" == true
- }
- out.write(0);
- for (final SevenZArchiveEntry entry : files) {
- if (entry.getHasAccessDate()) {
- out.writeLong(Long.reverseBytes(
- SevenZArchiveEntry.javaTimeToNtfsTime(entry.getAccessDate())));
- }
- }
- out.flush();
- final byte[] contents = baos.toByteArray();
- writeUint64(header, contents.length);
- header.write(contents);
- }
- }
-
- private void writeFileMTimes(final DataOutput header) throws IOException {
- int numLastModifiedDates = 0;
- for (final SevenZArchiveEntry entry : files) {
- if (entry.getHasLastModifiedDate()) {
- ++numLastModifiedDates;
- }
- }
- if (numLastModifiedDates > 0) {
- header.write(NID.kMTime);
-
- final ByteArrayOutputStream baos = new ByteArrayOutputStream();
- final DataOutputStream out = new DataOutputStream(baos);
- if (numLastModifiedDates != files.size()) {
- out.write(0);
- final BitSet mTimes = new BitSet(files.size());
- for (int i = 0; i < files.size(); i++) {
- mTimes.set(i, files.get(i).getHasLastModifiedDate());
- }
- writeBits(out, mTimes, files.size());
- } else {
- out.write(1); // "allAreDefined" == true
- }
- out.write(0);
- for (final SevenZArchiveEntry entry : files) {
- if (entry.getHasLastModifiedDate()) {
- out.writeLong(Long.reverseBytes(
- SevenZArchiveEntry.javaTimeToNtfsTime(entry.getLastModifiedDate())));
- }
- }
- out.flush();
- final byte[] contents = baos.toByteArray();
- writeUint64(header, contents.length);
- header.write(contents);
- }
- }
-
- private void writeFileWindowsAttributes(final DataOutput header) throws IOException {
- int numWindowsAttributes = 0;
- for (final SevenZArchiveEntry entry : files) {
- if (entry.getHasWindowsAttributes()) {
- ++numWindowsAttributes;
- }
- }
- if (numWindowsAttributes > 0) {
- header.write(NID.kWinAttributes);
-
- final ByteArrayOutputStream baos = new ByteArrayOutputStream();
- final DataOutputStream out = new DataOutputStream(baos);
- if (numWindowsAttributes != files.size()) {
- out.write(0);
- final BitSet attributes = new BitSet(files.size());
- for (int i = 0; i < files.size(); i++) {
- attributes.set(i, files.get(i).getHasWindowsAttributes());
- }
- writeBits(out, attributes, files.size());
- } else {
- out.write(1); // "allAreDefined" == true
- }
- out.write(0);
- for (final SevenZArchiveEntry entry : files) {
- if (entry.getHasWindowsAttributes()) {
- out.writeInt(Integer.reverseBytes(entry.getWindowsAttributes()));
- }
- }
- out.flush();
- final byte[] contents = baos.toByteArray();
- writeUint64(header, contents.length);
- header.write(contents);
- }
- }
-
- private void writeUint64(final DataOutput header, long value) throws IOException {
- int firstByte = 0;
- int mask = 0x80;
- int i;
- for (i = 0; i < 8; i++) {
- if (value < ((1L << ( 7 * (i + 1))))) {
- firstByte |= (value >>> (8 * i));
- break;
- }
- firstByte |= mask;
- mask >>>= 1;
- }
- header.write(firstByte);
- for (; i > 0; i--) {
- header.write((int) (0xff & value));
- value >>>= 8;
- }
- }
-
- private void writeBits(final DataOutput header, final BitSet bits, final int length) throws IOException {
- int cache = 0;
- int shift = 7;
- for (int i = 0; i < length; i++) {
- cache |= ((bits.get(i) ? 1 : 0) << shift);
- if (--shift < 0) {
- header.write(cache);
- shift = 7;
- cache = 0;
- }
- }
- if (shift != 7) {
- header.write(cache);
- }
- }
-
- private static Iterable reverse(final Iterable i) {
- final LinkedList l = new LinkedList<>();
- for (final T t : i) {
- l.addFirst(t);
- }
- return l;
- }
-
- private class OutputStreamWrapper extends OutputStream {
- private static final int BUF_SIZE = 8192;
- private final ByteBuffer buffer = ByteBuffer.allocate(BUF_SIZE);
- @Override
- public void write(final int b) throws IOException {
- buffer.clear();
- buffer.put((byte) b).flip();
- channel.write(buffer);
- compressedCrc32.update(b);
- fileBytesWritten++;
- }
-
- @Override
- public void write(final byte[] b) throws IOException {
- OutputStreamWrapper.this.write(b, 0, b.length);
- }
-
- @Override
- public void write(final byte[] b, final int off, final int len)
- throws IOException {
- if (len > BUF_SIZE) {
- channel.write(ByteBuffer.wrap(b, off, len));
- } else {
- buffer.clear();
- buffer.put(b, off, len).flip();
- channel.write(buffer);
- }
- compressedCrc32.update(b, off, len);
- fileBytesWritten += len;
- }
-
- @Override
- public void flush() throws IOException {
- // no reason to flush the channel
- }
-
- @Override
- public void close() throws IOException {
- // the file will be closed by the containing class's close method
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/StartHeader.java b/src/org/apache/commons/compress/archivers/sevenz/StartHeader.java
deleted file mode 100644
index a33aca70fa8..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/StartHeader.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-class StartHeader {
- long nextHeaderOffset;
- long nextHeaderSize;
- long nextHeaderCrc;
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/StreamMap.java b/src/org/apache/commons/compress/archivers/sevenz/StreamMap.java
deleted file mode 100644
index 9a10e1e847e..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/StreamMap.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-/// Map between folders, files and streams.
-class StreamMap {
- /// The first Archive.packStream index of each folder.
- int[] folderFirstPackStreamIndex;
- /// Offset to beginning of this pack stream's data, relative to the beginning of the first pack stream.
- long[] packStreamOffsets;
- /// Index of first file for each folder.
- int[] folderFirstFileIndex;
- /// Index of folder for each file.
- int[] fileFolderIndex;
-
- @Override
- public String toString() {
- return "StreamMap with indices of " + folderFirstPackStreamIndex.length
- + " folders, offsets of " + packStreamOffsets.length + " packed streams,"
- + " first files of " + folderFirstFileIndex.length + " folders and"
- + " folder indices for " + fileFolderIndex.length + " files";
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/SubStreamsInfo.java b/src/org/apache/commons/compress/archivers/sevenz/SubStreamsInfo.java
deleted file mode 100644
index 95fabc635d3..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/SubStreamsInfo.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.sevenz;
-
-import java.util.BitSet;
-
-/// Properties for non-empty files.
-class SubStreamsInfo {
- /// Unpacked size of each unpacked stream.
- long[] unpackSizes;
- /// Whether CRC is present for each unpacked stream.
- BitSet hasCrc;
- /// CRCs of unpacked streams, if present.
- long[] crcs;
-}
diff --git a/src/org/apache/commons/compress/archivers/sevenz/package.html b/src/org/apache/commons/compress/archivers/sevenz/package.html
deleted file mode 100644
index 975703b3850..00000000000
--- a/src/org/apache/commons/compress/archivers/sevenz/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
Provides classes for reading and writing archives using
- the 7z format.
-
-
diff --git a/src/org/apache/commons/compress/archivers/tar/TarArchiveEntry.java b/src/org/apache/commons/compress/archivers/tar/TarArchiveEntry.java
deleted file mode 100644
index ac98f0afa2f..00000000000
--- a/src/org/apache/commons/compress/archivers/tar/TarArchiveEntry.java
+++ /dev/null
@@ -1,1440 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.tar;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipEncoding;
-import org.apache.commons.compress.utils.ArchiveUtils;
-
-/**
- * This class represents an entry in a Tar archive. It consists
- * of the entry's header, as well as the entry's File. Entries
- * can be instantiated in one of three ways, depending on how
- * they are to be used.
- *
- * TarEntries that are created from the header bytes read from
- * an archive are instantiated with the TarEntry( byte[] )
- * constructor. These entries will be used when extracting from
- * or listing the contents of an archive. These entries have their
- * header filled in using the header bytes. They also set the File
- * to null, since they reference an archive entry not a file.
- *
- * TarEntries that are created from Files that are to be written
- * into an archive are instantiated with the TarEntry( File )
- * constructor. These entries have their header filled in using
- * the File's information. They also keep a reference to the File
- * for convenience when writing entries.
- *
- * Finally, TarEntries can be constructed from nothing but a name.
- * This allows the programmer to construct the entry by hand, for
- * instance when only an InputStream is available for writing to
- * the archive, and the header information is constructed from
- * other information. In this case the header fields are set to
- * defaults and the File is set to null.
- *
- *
- * The C structure for a Tar Entry's header is:
- *
- * struct header {
- * char name[100]; // TarConstants.NAMELEN - offset 0
- * char mode[8]; // TarConstants.MODELEN - offset 100
- * char uid[8]; // TarConstants.UIDLEN - offset 108
- * char gid[8]; // TarConstants.GIDLEN - offset 116
- * char size[12]; // TarConstants.SIZELEN - offset 124
- * char mtime[12]; // TarConstants.MODTIMELEN - offset 136
- * char chksum[8]; // TarConstants.CHKSUMLEN - offset 148
- * char linkflag[1]; // - offset 156
- * char linkname[100]; // TarConstants.NAMELEN - offset 157
- * The following fields are only present in new-style POSIX tar archives:
- * char magic[6]; // TarConstants.MAGICLEN - offset 257
- * char version[2]; // TarConstants.VERSIONLEN - offset 263
- * char uname[32]; // TarConstants.UNAMELEN - offset 265
- * char gname[32]; // TarConstants.GNAMELEN - offset 297
- * char devmajor[8]; // TarConstants.DEVLEN - offset 329
- * char devminor[8]; // TarConstants.DEVLEN - offset 337
- * char prefix[155]; // TarConstants.PREFIXLEN - offset 345
- * // Used if "name" field is not long enough to hold the path
- * char pad[12]; // NULs - offset 500
- * } header;
- * All unused bytes are set to null.
- * New-style GNU tar files are slightly different from the above.
- * For values of size larger than 077777777777L (11 7s)
- * or uid and gid larger than 07777777L (7 7s)
- * the sign bit of the first byte is set, and the rest of the
- * field is the binary representation of the number.
- * See TarUtils.parseOctalOrBinary.
- *
- *
- *
- * The C structure for a old GNU Tar Entry's header is:
- *
which is identical to new-style POSIX up to the first 130 bytes of the prefix.
- *
- * @NotThreadSafe
- */
-
-public class TarArchiveEntry implements ArchiveEntry, TarConstants {
- private static final TarArchiveEntry[] EMPTY_TAR_ARCHIVE_ENTRIES = new TarArchiveEntry[0];
-
- /** The entry's name. */
- private String name = "";
-
- /** Whether to allow leading slashes or drive names inside the name */
- private final boolean preserveAbsolutePath;
-
- /** The entry's permission mode. */
- private int mode;
-
- /** The entry's user id. */
- private long userId = 0;
-
- /** The entry's group id. */
- private long groupId = 0;
-
- /** The entry's size. */
- private long size = 0;
-
- /** The entry's modification time. */
- private long modTime;
-
- /** If the header checksum is reasonably correct. */
- private boolean checkSumOK;
-
- /** The entry's link flag. */
- private byte linkFlag;
-
- /** The entry's link name. */
- private String linkName = "";
-
- /** The entry's magic tag. */
- private String magic = MAGIC_POSIX;
- /** The version of the format */
- private String version = VERSION_POSIX;
-
- /** The entry's user name. */
- private String userName;
-
- /** The entry's group name. */
- private String groupName = "";
-
- /** The entry's major device number. */
- private int devMajor = 0;
-
- /** The entry's minor device number. */
- private int devMinor = 0;
-
- /** If an extension sparse header follows. */
- private boolean isExtended;
-
- /** The entry's real size in case of a sparse file. */
- private long realSize;
-
- /** is this entry a GNU sparse entry using one of the PAX formats? */
- private boolean paxGNUSparse;
-
- /** is this entry a star sparse entry using the PAX header? */
- private boolean starSparse;
-
- /** The entry's file reference */
- private final File file;
-
- /** Extra, user supplied pax headers */
- private final Map extraPaxHeaders = new HashMap<>();
-
- /** Maximum length of a user's name in the tar file */
- public static final int MAX_NAMELEN = 31;
-
- /** Default permissions bits for directories */
- public static final int DEFAULT_DIR_MODE = 040755;
-
- /** Default permissions bits for files */
- public static final int DEFAULT_FILE_MODE = 0100644;
-
- /** Convert millis to seconds */
- public static final int MILLIS_PER_SECOND = 1000;
-
-
- /**
- * Construct an empty entry and prepares the header values.
- */
- private TarArchiveEntry(boolean preserveAbsolutePath) {
- String user = System.getProperty("user.name", "");
-
- if (user.length() > MAX_NAMELEN) {
- user = user.substring(0, MAX_NAMELEN);
- }
-
- this.userName = user;
- this.file = null;
- this.preserveAbsolutePath = preserveAbsolutePath;
- }
-
- /**
- * Construct an entry with only a name. This allows the programmer
- * to construct the entry's header "by hand". File is set to null.
- *
- *
The entry's name will be the value of the {@code name}
- * argument with all file separators replaced by forward slashes
- * and leading slashes as well as Windows drive letters stripped.
- *
- * @param name the entry name
- */
- public TarArchiveEntry(final String name) {
- this(name, false);
- }
-
- /**
- * Construct an entry with only a name. This allows the programmer
- * to construct the entry's header "by hand". File is set to null.
- *
- *
The entry's name will be the value of the {@code name}
- * argument with all file separators replaced by forward slashes.
- * Leading slashes and Windows drive letters are stripped if
- * {@code preserveAbsolutePath} is {@code false}.
- *
- * @param name the entry name
- * @param preserveAbsolutePath whether to allow leading slashes
- * or drive letters in the name.
- *
- * @since 1.1
- */
- public TarArchiveEntry(String name, final boolean preserveAbsolutePath) {
- this(preserveAbsolutePath);
-
- name = normalizeFileName(name, preserveAbsolutePath);
- final boolean isDir = name.endsWith("/");
-
- this.name = name;
- this.mode = isDir ? DEFAULT_DIR_MODE : DEFAULT_FILE_MODE;
- this.linkFlag = isDir ? LF_DIR : LF_NORMAL;
- this.modTime = new Date().getTime() / MILLIS_PER_SECOND;
- this.userName = "";
- }
-
- /**
- * Construct an entry with a name and a link flag.
- *
- *
The entry's name will be the value of the {@code name}
- * argument with all file separators replaced by forward slashes
- * and leading slashes as well as Windows drive letters
- * stripped.
- *
- * @param name the entry name
- * @param linkFlag the entry link flag.
- */
- public TarArchiveEntry(final String name, final byte linkFlag) {
- this(name, linkFlag, false);
- }
-
- /**
- * Construct an entry with a name and a link flag.
- *
- *
The entry's name will be the value of the {@code name}
- * argument with all file separators replaced by forward slashes.
- * Leading slashes and Windows drive letters are stripped if
- * {@code preserveAbsolutePath} is {@code false}.
- *
- * @param name the entry name
- * @param linkFlag the entry link flag.
- * @param preserveAbsolutePath whether to allow leading slashes
- * or drive letters in the name.
- *
- * @since 1.5
- */
- public TarArchiveEntry(final String name, final byte linkFlag, final boolean preserveAbsolutePath) {
- this(name, preserveAbsolutePath);
- this.linkFlag = linkFlag;
- if (linkFlag == LF_GNUTYPE_LONGNAME) {
- magic = MAGIC_GNU;
- version = VERSION_GNU_SPACE;
- }
- }
-
- /**
- * Construct an entry for a file. File is set to file, and the
- * header is constructed from information from the file.
- * The name is set from the normalized file path.
- *
- *
The entry's name will be the value of the {@code file}'s
- * path with all file separators replaced by forward slashes and
- * leading slashes as well as Windows drive letters stripped. The
- * name will end in a slash if the {@code file} represents a
- * directory.
- *
- * @param file The file that the entry represents.
- */
- public TarArchiveEntry(final File file) {
- this(file, file.getPath());
- }
-
- /**
- * Construct an entry for a file. File is set to file, and the
- * header is constructed from information from the file.
- *
- *
The entry's name will be the value of the {@code fileName}
- * argument with all file separators replaced by forward slashes
- * and leading slashes as well as Windows drive letters stripped.
- * The name will end in a slash if the {@code file} represents a
- * directory.
- *
- * @param file The file that the entry represents.
- * @param fileName the name to be used for the entry.
- */
- public TarArchiveEntry(final File file, final String fileName) {
- final String normalizedName = normalizeFileName(fileName, false);
- this.file = file;
-
- if (file.isDirectory()) {
- this.mode = DEFAULT_DIR_MODE;
- this.linkFlag = LF_DIR;
-
- final int nameLength = normalizedName.length();
- if (nameLength == 0 || normalizedName.charAt(nameLength - 1) != '/') {
- this.name = normalizedName + "/";
- } else {
- this.name = normalizedName;
- }
- } else {
- this.mode = DEFAULT_FILE_MODE;
- this.linkFlag = LF_NORMAL;
- this.size = file.length();
- this.name = normalizedName;
- }
-
- this.modTime = file.lastModified() / MILLIS_PER_SECOND;
- this.userName = "";
- preserveAbsolutePath = false;
- }
-
- /**
- * Construct an entry from an archive's header bytes. File is set
- * to null.
- *
- * @param headerBuf The header bytes from a tar archive entry.
- * @throws IllegalArgumentException if any of the numeric fields have an invalid format
- */
- public TarArchiveEntry(final byte[] headerBuf) {
- this(false);
- parseTarHeader(headerBuf);
- }
-
- /**
- * Construct an entry from an archive's header bytes. File is set
- * to null.
- *
- * @param headerBuf The header bytes from a tar archive entry.
- * @param encoding encoding to use for file names
- * @since 1.4
- * @throws IllegalArgumentException if any of the numeric fields have an invalid format
- * @throws IOException on error
- */
- public TarArchiveEntry(final byte[] headerBuf, final ZipEncoding encoding)
- throws IOException {
- this(false);
- parseTarHeader(headerBuf, encoding);
- }
-
- /**
- * Determine if the two entries are equal. Equality is determined
- * by the header names being equal.
- *
- * @param it Entry to be checked for equality.
- * @return True if the entries are equal.
- */
- public boolean equals(final TarArchiveEntry it) {
- return it != null && getName().equals(it.getName());
- }
-
- /**
- * Determine if the two entries are equal. Equality is determined
- * by the header names being equal.
- *
- * @param it Entry to be checked for equality.
- * @return True if the entries are equal.
- */
- @Override
- public boolean equals(final Object it) {
- if (it == null || getClass() != it.getClass()) {
- return false;
- }
- return equals((TarArchiveEntry) it);
- }
-
- /**
- * Hashcodes are based on entry names.
- *
- * @return the entry hashcode
- */
- @Override
- public int hashCode() {
- return getName().hashCode();
- }
-
- /**
- * Determine if the given entry is a descendant of this entry.
- * Descendancy is determined by the name of the descendant
- * starting with this entry's name.
- *
- * @param desc Entry to be checked as a descendent of this.
- * @return True if entry is a descendant of this.
- */
- public boolean isDescendent(final TarArchiveEntry desc) {
- return desc.getName().startsWith(getName());
- }
-
- /**
- * Get this entry's name.
- *
- *
This method returns the raw name as it is stored inside of the archive.
- *
- * @return This entry's name.
- */
- @Override
- public String getName() {
- return name;
- }
-
- /**
- * Set this entry's name.
- *
- * @param name This entry's new name.
- */
- public void setName(final String name) {
- this.name = normalizeFileName(name, this.preserveAbsolutePath);
- }
-
- /**
- * Set the mode for this entry
- *
- * @param mode the mode for this entry
- */
- public void setMode(final int mode) {
- this.mode = mode;
- }
-
- /**
- * Get this entry's link name.
- *
- * @return This entry's link name.
- */
- public String getLinkName() {
- return linkName;
- }
-
- /**
- * Set this entry's link name.
- *
- * @param link the link name to use.
- *
- * @since 1.1
- */
- public void setLinkName(final String link) {
- this.linkName = link;
- }
-
- /**
- * Get this entry's user id.
- *
- * @return This entry's user id.
- * @deprecated use #getLongUserId instead as user ids can be
- * bigger than {@link Integer#MAX_VALUE}
- */
- @Deprecated
- public int getUserId() {
- return (int) (userId & 0xffffffff);
- }
-
- /**
- * Set this entry's user id.
- *
- * @param userId This entry's new user id.
- */
- public void setUserId(final int userId) {
- setUserId((long) userId);
- }
-
- /**
- * Get this entry's user id.
- *
- * @return This entry's user id.
- * @since 1.10
- */
- public long getLongUserId() {
- return userId;
- }
-
- /**
- * Set this entry's user id.
- *
- * @param userId This entry's new user id.
- * @since 1.10
- */
- public void setUserId(final long userId) {
- this.userId = userId;
- }
-
- /**
- * Get this entry's group id.
- *
- * @return This entry's group id.
- * @deprecated use #getLongGroupId instead as group ids can be
- * bigger than {@link Integer#MAX_VALUE}
- */
- @Deprecated
- public int getGroupId() {
- return (int) (groupId & 0xffffffff);
- }
-
- /**
- * Set this entry's group id.
- *
- * @param groupId This entry's new group id.
- */
- public void setGroupId(final int groupId) {
- setGroupId((long) groupId);
- }
-
- /**
- * Get this entry's group id.
- *
- * @since 1.10
- * @return This entry's group id.
- */
- public long getLongGroupId() {
- return groupId;
- }
-
- /**
- * Set this entry's group id.
- *
- * @since 1.10
- * @param groupId This entry's new group id.
- */
- public void setGroupId(final long groupId) {
- this.groupId = groupId;
- }
-
- /**
- * Get this entry's user name.
- *
- * @return This entry's user name.
- */
- public String getUserName() {
- return userName;
- }
-
- /**
- * Set this entry's user name.
- *
- * @param userName This entry's new user name.
- */
- public void setUserName(final String userName) {
- this.userName = userName;
- }
-
- /**
- * Get this entry's group name.
- *
- * @return This entry's group name.
- */
- public String getGroupName() {
- return groupName;
- }
-
- /**
- * Set this entry's group name.
- *
- * @param groupName This entry's new group name.
- */
- public void setGroupName(final String groupName) {
- this.groupName = groupName;
- }
-
- /**
- * Convenience method to set this entry's group and user ids.
- *
- * @param userId This entry's new user id.
- * @param groupId This entry's new group id.
- */
- public void setIds(final int userId, final int groupId) {
- setUserId(userId);
- setGroupId(groupId);
- }
-
- /**
- * Convenience method to set this entry's group and user names.
- *
- * @param userName This entry's new user name.
- * @param groupName This entry's new group name.
- */
- public void setNames(final String userName, final String groupName) {
- setUserName(userName);
- setGroupName(groupName);
- }
-
- /**
- * Set this entry's modification time. The parameter passed
- * to this method is in "Java time".
- *
- * @param time This entry's new modification time.
- */
- public void setModTime(final long time) {
- modTime = time / MILLIS_PER_SECOND;
- }
-
- /**
- * Set this entry's modification time.
- *
- * @param time This entry's new modification time.
- */
- public void setModTime(final Date time) {
- modTime = time.getTime() / MILLIS_PER_SECOND;
- }
-
- /**
- * Set this entry's modification time.
- *
- * @return time This entry's new modification time.
- */
- public Date getModTime() {
- return new Date(modTime * MILLIS_PER_SECOND);
- }
-
- @Override
- public Date getLastModifiedDate() {
- return getModTime();
- }
-
- /**
- * Get this entry's checksum status.
- *
- * @return if the header checksum is reasonably correct
- * @see TarUtils#verifyCheckSum(byte[])
- * @since 1.5
- */
- public boolean isCheckSumOK() {
- return checkSumOK;
- }
-
- /**
- * Get this entry's file.
- *
- *
This method is only useful for entries created from a {@code
- * File} but not for entries read from an archive.
- *
- * @return This entry's file.
- */
- public File getFile() {
- return file;
- }
-
- /**
- * Get this entry's mode.
- *
- * @return This entry's mode.
- */
- public int getMode() {
- return mode;
- }
-
- /**
- * Get this entry's file size.
- *
- * @return This entry's file size.
- */
- @Override
- public long getSize() {
- return size;
- }
-
- /**
- * Set this entry's file size.
- *
- * @param size This entry's new file size.
- * @throws IllegalArgumentException if the size is < 0.
- */
- public void setSize(final long size) {
- if (size < 0){
- throw new IllegalArgumentException("Size is out of range: "+size);
- }
- this.size = size;
- }
-
- /**
- * Get this entry's major device number.
- *
- * @return This entry's major device number.
- * @since 1.4
- */
- public int getDevMajor() {
- return devMajor;
- }
-
- /**
- * Set this entry's major device number.
- *
- * @param devNo This entry's major device number.
- * @throws IllegalArgumentException if the devNo is < 0.
- * @since 1.4
- */
- public void setDevMajor(final int devNo) {
- if (devNo < 0){
- throw new IllegalArgumentException("Major device number is out of "
- + "range: " + devNo);
- }
- this.devMajor = devNo;
- }
-
- /**
- * Get this entry's minor device number.
- *
- * @return This entry's minor device number.
- * @since 1.4
- */
- public int getDevMinor() {
- return devMinor;
- }
-
- /**
- * Set this entry's minor device number.
- *
- * @param devNo This entry's minor device number.
- * @throws IllegalArgumentException if the devNo is < 0.
- * @since 1.4
- */
- public void setDevMinor(final int devNo) {
- if (devNo < 0){
- throw new IllegalArgumentException("Minor device number is out of "
- + "range: " + devNo);
- }
- this.devMinor = devNo;
- }
-
- /**
- * Indicates in case of an oldgnu sparse file if an extension
- * sparse header follows.
- *
- * @return true if an extension oldgnu sparse header follows.
- */
- public boolean isExtended() {
- return isExtended;
- }
-
- /**
- * Get this entry's real file size in case of a sparse file.
- *
- * @return This entry's real file size.
- */
- public long getRealSize() {
- return realSize;
- }
-
- /**
- * Indicate if this entry is a GNU sparse block.
- *
- * @return true if this is a sparse extension provided by GNU tar
- */
- public boolean isGNUSparse() {
- return isOldGNUSparse() || isPaxGNUSparse();
- }
-
- /**
- * Indicate if this entry is a GNU or star sparse block using the
- * oldgnu format.
- *
- * @return true if this is a sparse extension provided by GNU tar or star
- * @since 1.11
- */
- public boolean isOldGNUSparse() {
- return linkFlag == LF_GNUTYPE_SPARSE;
- }
-
- /**
- * Indicate if this entry is a GNU sparse block using one of the
- * PAX formats.
- *
- * @return true if this is a sparse extension provided by GNU tar
- * @since 1.11
- */
- public boolean isPaxGNUSparse() {
- return paxGNUSparse;
- }
-
- /**
- * Indicate if this entry is a star sparse block using PAX headers.
- *
- * @return true if this is a sparse extension provided by star
- * @since 1.11
- */
- public boolean isStarSparse() {
- return starSparse;
- }
-
- /**
- * Indicate if this entry is a GNU long linkname block
- *
- * @return true if this is a long name extension provided by GNU tar
- */
- public boolean isGNULongLinkEntry() {
- return linkFlag == LF_GNUTYPE_LONGLINK;
- }
-
- /**
- * Indicate if this entry is a GNU long name block
- *
- * @return true if this is a long name extension provided by GNU tar
- */
- public boolean isGNULongNameEntry() {
- return linkFlag == LF_GNUTYPE_LONGNAME;
- }
-
- /**
- * Check if this is a Pax header.
- *
- * @return {@code true} if this is a Pax header.
- *
- * @since 1.1
- *
- */
- public boolean isPaxHeader() {
- return linkFlag == LF_PAX_EXTENDED_HEADER_LC
- || linkFlag == LF_PAX_EXTENDED_HEADER_UC;
- }
-
- /**
- * Check if this is a Pax header.
- *
- * @return {@code true} if this is a Pax header.
- *
- * @since 1.1
- */
- public boolean isGlobalPaxHeader() {
- return linkFlag == LF_PAX_GLOBAL_EXTENDED_HEADER;
- }
-
- /**
- * Return whether or not this entry represents a directory.
- *
- * @return True if this entry is a directory.
- */
- @Override
- public boolean isDirectory() {
- if (file != null) {
- return file.isDirectory();
- }
-
- if (linkFlag == LF_DIR) {
- return true;
- }
-
- return !isPaxHeader() && !isGlobalPaxHeader() && getName().endsWith("/");
- }
-
- /**
- * Check if this is a "normal file"
- *
- * @since 1.2
- * @return whether this is a "normal file"
- */
- public boolean isFile() {
- if (file != null) {
- return file.isFile();
- }
- if (linkFlag == LF_OLDNORM || linkFlag == LF_NORMAL) {
- return true;
- }
- return !getName().endsWith("/");
- }
-
- /**
- * Check if this is a symbolic link entry.
- *
- * @since 1.2
- * @return whether this is a symbolic link
- */
- public boolean isSymbolicLink() {
- return linkFlag == LF_SYMLINK;
- }
-
- /**
- * Check if this is a link entry.
- *
- * @since 1.2
- * @return whether this is a link entry
- */
- public boolean isLink() {
- return linkFlag == LF_LINK;
- }
-
- /**
- * Check if this is a character device entry.
- *
- * @since 1.2
- * @return whether this is a character device
- */
- public boolean isCharacterDevice() {
- return linkFlag == LF_CHR;
- }
-
- /**
- * Check if this is a block device entry.
- *
- * @since 1.2
- * @return whether this is a block device
- */
- public boolean isBlockDevice() {
- return linkFlag == LF_BLK;
- }
-
- /**
- * Check if this is a FIFO (pipe) entry.
- *
- * @since 1.2
- * @return whether this is a FIFO entry
- */
- public boolean isFIFO() {
- return linkFlag == LF_FIFO;
- }
-
- /**
- * Check whether this is a sparse entry.
- *
- * @return whether this is a sparse entry
- * @since 1.11
- */
- public boolean isSparse() {
- return isGNUSparse() || isStarSparse();
- }
-
- /**
- * get extra PAX Headers
- * @return read-only map containing any extra PAX Headers
- * @since 1.15
- */
- public Map getExtraPaxHeaders() {
- return Collections.unmodifiableMap(extraPaxHeaders);
- }
-
- /**
- * clear all extra PAX headers.
- * @since 1.15
- */
- public void clearExtraPaxHeaders() {
- extraPaxHeaders.clear();
- }
-
- /**
- * add a PAX header to this entry. If the header corresponds to an existing field in the entry,
- * that field will be set; otherwise the header will be added to the extraPaxHeaders Map
- * @param name The full name of the header to set.
- * @param value value of header.
- * @since 1.15
- */
- public void addPaxHeader(String name,String value) {
- processPaxHeader(name,value);
- }
-
- /**
- * get named extra PAX header
- * @param name The full name of an extended PAX header to retrieve
- * @return The value of the header, if any.
- * @since 1.15
- */
- public String getExtraPaxHeader(String name) {
- return extraPaxHeaders.get(name);
- }
-
- /**
- * Update the entry using a map of pax headers.
- * @param headers
- * @since 1.15
- */
- void updateEntryFromPaxHeaders(Map headers) {
- for (final Map.Entry ent : headers.entrySet()) {
- final String key = ent.getKey();
- final String val = ent.getValue();
- processPaxHeader(key, val, headers);
- }
- }
-
- /**
- * process one pax header, using the entries extraPaxHeaders map as source for extra headers
- * used when handling entries for sparse files.
- * @param key
- * @param val
- * @since 1.15
- */
- private void processPaxHeader(String key, String val) {
- processPaxHeader(key,val,extraPaxHeaders);
- }
-
- /**
- * Process one pax header, using the supplied map as source for extra headers to be used when handling
- * entries for sparse files
- *
- * @param key the header name.
- * @param val the header value.
- * @param headers map of headers used for dealing with sparse file.
- * @since 1.15
- */
- private void processPaxHeader(String key, String val, Map headers) {
- /*
- * The following headers are defined for Pax.
- * atime, ctime, charset: cannot use these without changing TarArchiveEntry fields
- * mtime
- * comment
- * gid, gname
- * linkpath
- * size
- * uid,uname
- * SCHILY.devminor, SCHILY.devmajor: don't have setters/getters for those
- *
- * GNU sparse files use additional members, we use
- * GNU.sparse.size to detect the 0.0 and 0.1 versions and
- * GNU.sparse.realsize for 1.0.
- *
- * star files use additional members of which we use
- * SCHILY.filetype in order to detect star sparse files.
- *
- * If called from addExtraPaxHeader, these additional headers must be already present .
- */
- switch (key) {
- case "path":
- setName(val);
- break;
- case "linkpath":
- setLinkName(val);
- break;
- case "gid":
- setGroupId(Long.parseLong(val));
- break;
- case "gname":
- setGroupName(val);
- break;
- case "uid":
- setUserId(Long.parseLong(val));
- break;
- case "uname":
- setUserName(val);
- break;
- case "size":
- setSize(Long.parseLong(val));
- break;
- case "mtime":
- setModTime((long) (Double.parseDouble(val) * 1000));
- break;
- case "SCHILY.devminor":
- setDevMinor(Integer.parseInt(val));
- break;
- case "SCHILY.devmajor":
- setDevMajor(Integer.parseInt(val));
- break;
- case "GNU.sparse.size":
- fillGNUSparse0xData(headers);
- break;
- case "GNU.sparse.realsize":
- fillGNUSparse1xData(headers);
- break;
- case "SCHILY.filetype":
- if ("sparse".equals(val)) {
- fillStarSparseData(headers);
- }
- break;
- default:
- extraPaxHeaders.put(key,val);
- }
- }
-
-
-
- /**
- * If this entry represents a file, and the file is a directory, return
- * an array of TarEntries for this entry's children.
- *
- *
This method is only useful for entries created from a {@code
- * File} but not for entries read from an archive.
- *
- * @return An array of TarEntry's for this entry's children.
- */
- public TarArchiveEntry[] getDirectoryEntries() {
- if (file == null || !file.isDirectory()) {
- return EMPTY_TAR_ARCHIVE_ENTRIES;
- }
-
- final String[] list = file.list();
- if (list == null) {
- return EMPTY_TAR_ARCHIVE_ENTRIES;
- }
- final TarArchiveEntry[] result = new TarArchiveEntry[list.length];
-
- for (int i = 0; i < result.length; ++i) {
- result[i] = new TarArchiveEntry(new File(file, list[i]));
- }
-
- return result;
- }
-
- /**
- * Write an entry's header information to a header buffer.
- *
- *
This method does not use the star/GNU tar/BSD tar extensions.
- *
- * @param outbuf The tar entry header buffer to fill in.
- */
- public void writeEntryHeader(final byte[] outbuf) {
- try {
- writeEntryHeader(outbuf, TarUtils.DEFAULT_ENCODING, false);
- } catch (final IOException ex) {
- try {
- writeEntryHeader(outbuf, TarUtils.FALLBACK_ENCODING, false);
- } catch (final IOException ex2) {
- // impossible
- throw new RuntimeException(ex2); //NOSONAR
- }
- }
- }
-
- /**
- * Write an entry's header information to a header buffer.
- *
- * @param outbuf The tar entry header buffer to fill in.
- * @param encoding encoding to use when writing the file name.
- * @param starMode whether to use the star/GNU tar/BSD tar
- * extension for numeric fields if their value doesn't fit in the
- * maximum size of standard tar archives
- * @since 1.4
- * @throws IOException on error
- */
- public void writeEntryHeader(final byte[] outbuf, final ZipEncoding encoding,
- final boolean starMode) throws IOException {
- int offset = 0;
-
- offset = TarUtils.formatNameBytes(name, outbuf, offset, NAMELEN,
- encoding);
- offset = writeEntryHeaderField(mode, outbuf, offset, MODELEN, starMode);
- offset = writeEntryHeaderField(userId, outbuf, offset, UIDLEN,
- starMode);
- offset = writeEntryHeaderField(groupId, outbuf, offset, GIDLEN,
- starMode);
- offset = writeEntryHeaderField(size, outbuf, offset, SIZELEN, starMode);
- offset = writeEntryHeaderField(modTime, outbuf, offset, MODTIMELEN,
- starMode);
-
- final int csOffset = offset;
-
- for (int c = 0; c < CHKSUMLEN; ++c) {
- outbuf[offset++] = (byte) ' ';
- }
-
- outbuf[offset++] = linkFlag;
- offset = TarUtils.formatNameBytes(linkName, outbuf, offset, NAMELEN,
- encoding);
- offset = TarUtils.formatNameBytes(magic, outbuf, offset, MAGICLEN);
- offset = TarUtils.formatNameBytes(version, outbuf, offset, VERSIONLEN);
- offset = TarUtils.formatNameBytes(userName, outbuf, offset, UNAMELEN,
- encoding);
- offset = TarUtils.formatNameBytes(groupName, outbuf, offset, GNAMELEN,
- encoding);
- offset = writeEntryHeaderField(devMajor, outbuf, offset, DEVLEN,
- starMode);
- offset = writeEntryHeaderField(devMinor, outbuf, offset, DEVLEN,
- starMode);
-
- while (offset < outbuf.length) {
- outbuf[offset++] = 0;
- }
-
- final long chk = TarUtils.computeCheckSum(outbuf);
-
- TarUtils.formatCheckSumOctalBytes(chk, outbuf, csOffset, CHKSUMLEN);
- }
-
- private int writeEntryHeaderField(final long value, final byte[] outbuf, final int offset,
- final int length, final boolean starMode) {
- if (!starMode && (value < 0
- || value >= 1L << 3 * (length - 1))) {
- // value doesn't fit into field when written as octal
- // number, will be written to PAX header or causes an
- // error
- return TarUtils.formatLongOctalBytes(0, outbuf, offset, length);
- }
- return TarUtils.formatLongOctalOrBinaryBytes(value, outbuf, offset,
- length);
- }
-
- /**
- * Parse an entry's header information from a header buffer.
- *
- * @param header The tar entry header buffer to get information from.
- * @throws IllegalArgumentException if any of the numeric fields have an invalid format
- */
- public void parseTarHeader(final byte[] header) {
- try {
- parseTarHeader(header, TarUtils.DEFAULT_ENCODING);
- } catch (final IOException ex) {
- try {
- parseTarHeader(header, TarUtils.DEFAULT_ENCODING, true);
- } catch (final IOException ex2) {
- // not really possible
- throw new RuntimeException(ex2); //NOSONAR
- }
- }
- }
-
- /**
- * Parse an entry's header information from a header buffer.
- *
- * @param header The tar entry header buffer to get information from.
- * @param encoding encoding to use for file names
- * @since 1.4
- * @throws IllegalArgumentException if any of the numeric fields
- * have an invalid format
- * @throws IOException on error
- */
- public void parseTarHeader(final byte[] header, final ZipEncoding encoding)
- throws IOException {
- parseTarHeader(header, encoding, false);
- }
-
- private void parseTarHeader(final byte[] header, final ZipEncoding encoding,
- final boolean oldStyle)
- throws IOException {
- int offset = 0;
-
- name = oldStyle ? TarUtils.parseName(header, offset, NAMELEN)
- : TarUtils.parseName(header, offset, NAMELEN, encoding);
- offset += NAMELEN;
- mode = (int) TarUtils.parseOctalOrBinary(header, offset, MODELEN);
- offset += MODELEN;
- userId = (int) TarUtils.parseOctalOrBinary(header, offset, UIDLEN);
- offset += UIDLEN;
- groupId = (int) TarUtils.parseOctalOrBinary(header, offset, GIDLEN);
- offset += GIDLEN;
- size = TarUtils.parseOctalOrBinary(header, offset, SIZELEN);
- offset += SIZELEN;
- modTime = TarUtils.parseOctalOrBinary(header, offset, MODTIMELEN);
- offset += MODTIMELEN;
- checkSumOK = TarUtils.verifyCheckSum(header);
- offset += CHKSUMLEN;
- linkFlag = header[offset++];
- linkName = oldStyle ? TarUtils.parseName(header, offset, NAMELEN)
- : TarUtils.parseName(header, offset, NAMELEN, encoding);
- offset += NAMELEN;
- magic = TarUtils.parseName(header, offset, MAGICLEN);
- offset += MAGICLEN;
- version = TarUtils.parseName(header, offset, VERSIONLEN);
- offset += VERSIONLEN;
- userName = oldStyle ? TarUtils.parseName(header, offset, UNAMELEN)
- : TarUtils.parseName(header, offset, UNAMELEN, encoding);
- offset += UNAMELEN;
- groupName = oldStyle ? TarUtils.parseName(header, offset, GNAMELEN)
- : TarUtils.parseName(header, offset, GNAMELEN, encoding);
- offset += GNAMELEN;
- if (linkFlag == LF_CHR || linkFlag == LF_BLK) {
- devMajor = (int) TarUtils.parseOctalOrBinary(header, offset, DEVLEN);
- offset += DEVLEN;
- devMinor = (int) TarUtils.parseOctalOrBinary(header, offset, DEVLEN);
- offset += DEVLEN;
- } else {
- offset += 2 * DEVLEN;
- }
-
- final int type = evaluateType(header);
- switch (type) {
- case FORMAT_OLDGNU: {
- offset += ATIMELEN_GNU;
- offset += CTIMELEN_GNU;
- offset += OFFSETLEN_GNU;
- offset += LONGNAMESLEN_GNU;
- offset += PAD2LEN_GNU;
- offset += SPARSELEN_GNU;
- isExtended = TarUtils.parseBoolean(header, offset);
- offset += ISEXTENDEDLEN_GNU;
- realSize = TarUtils.parseOctal(header, offset, REALSIZELEN_GNU);
- offset += REALSIZELEN_GNU; // NOSONAR - assignment as documentation
- break;
- }
- case FORMAT_XSTAR: {
- final String xstarPrefix = oldStyle
- ? TarUtils.parseName(header, offset, PREFIXLEN_XSTAR)
- : TarUtils.parseName(header, offset, PREFIXLEN_XSTAR, encoding);
- if (xstarPrefix.length() > 0) {
- name = xstarPrefix + "/" + name;
- }
- break;
- }
- case FORMAT_POSIX:
- default: {
- final String prefix = oldStyle
- ? TarUtils.parseName(header, offset, PREFIXLEN)
- : TarUtils.parseName(header, offset, PREFIXLEN, encoding);
- // SunOS tar -E does not add / to directory names, so fix
- // up to be consistent
- if (isDirectory() && !name.endsWith("/")){
- name = name + "/";
- }
- if (prefix.length() > 0){
- name = prefix + "/" + name;
- }
- }
- }
- }
-
- /**
- * Strips Windows' drive letter as well as any leading slashes,
- * turns path separators into forward slahes.
- */
- private static String normalizeFileName(String fileName,
- final boolean preserveAbsolutePath) {
- if (!preserveAbsolutePath) {
- final String osname = System.getProperty("os.name").toLowerCase(Locale.ENGLISH);
-
- if (osname != null) {
-
- // Strip off drive letters!
- // REVIEW Would a better check be "(File.separator == '\')"?
-
- if (osname.startsWith("windows")) {
- if (fileName.length() > 2) {
- final char ch1 = fileName.charAt(0);
- final char ch2 = fileName.charAt(1);
-
- if (ch2 == ':'
- && (ch1 >= 'a' && ch1 <= 'z'
- || ch1 >= 'A' && ch1 <= 'Z')) {
- fileName = fileName.substring(2);
- }
- }
- } else if (osname.contains("netware")) {
- final int colon = fileName.indexOf(':');
- if (colon != -1) {
- fileName = fileName.substring(colon + 1);
- }
- }
- }
- }
-
- fileName = fileName.replace(File.separatorChar, '/');
-
- // No absolute pathnames
- // Windows (and Posix?) paths can start with "\\NetworkDrive\",
- // so we loop on starting /'s.
- while (!preserveAbsolutePath && fileName.startsWith("/")) {
- fileName = fileName.substring(1);
- }
- return fileName;
- }
-
- /**
- * Evaluate an entry's header format from a header buffer.
- *
- * @param header The tar entry header buffer to evaluate the format for.
- * @return format type
- */
- private int evaluateType(final byte[] header) {
- if (ArchiveUtils.matchAsciiBuffer(MAGIC_GNU, header, MAGIC_OFFSET, MAGICLEN)) {
- return FORMAT_OLDGNU;
- }
- if (ArchiveUtils.matchAsciiBuffer(MAGIC_POSIX, header, MAGIC_OFFSET, MAGICLEN)) {
- if (ArchiveUtils.matchAsciiBuffer(MAGIC_XSTAR, header, XSTAR_MAGIC_OFFSET,
- XSTAR_MAGIC_LEN)) {
- return FORMAT_XSTAR;
- }
- return FORMAT_POSIX;
- }
- return 0;
- }
-
- void fillGNUSparse0xData(final Map headers) {
- paxGNUSparse = true;
- realSize = Integer.parseInt(headers.get("GNU.sparse.size"));
- if (headers.containsKey("GNU.sparse.name")) {
- // version 0.1
- name = headers.get("GNU.sparse.name");
- }
- }
-
- void fillGNUSparse1xData(final Map headers) {
- paxGNUSparse = true;
- realSize = Integer.parseInt(headers.get("GNU.sparse.realsize"));
- name = headers.get("GNU.sparse.name");
- }
-
- void fillStarSparseData(final Map headers) {
- starSparse = true;
- if (headers.containsKey("SCHILY.realsize")) {
- realSize = Long.parseLong(headers.get("SCHILY.realsize"));
- }
- }
-}
-
diff --git a/src/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java b/src/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java
deleted file mode 100644
index daaf729f264..00000000000
--- a/src/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java
+++ /dev/null
@@ -1,714 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/*
- * This package is based on the work done by Timothy Gerard Endres
- * (time@ice.com) to whom the Ant project is very grateful for his great code.
- */
-
-package org.apache.commons.compress.archivers.tar;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.archivers.zip.ZipEncoding;
-import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
-import org.apache.commons.compress.utils.ArchiveUtils;
-import org.apache.commons.compress.utils.CharsetNames;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * The TarInputStream reads a UNIX tar archive as an InputStream.
- * methods are provided to position at each successive entry in
- * the archive, and the read each entry as a normal input stream
- * using read().
- * @NotThreadSafe
- */
-public class TarArchiveInputStream extends ArchiveInputStream {
-
- private static final int SMALL_BUFFER_SIZE = 256;
-
- private final byte[] smallBuf = new byte[SMALL_BUFFER_SIZE];
-
- /** The size the TAR header */
- private final int recordSize;
-
- /** The size of a block */
- private final int blockSize;
-
- /** True if file has hit EOF */
- private boolean hasHitEOF;
-
- /** Size of the current entry */
- private long entrySize;
-
- /** How far into the entry the stream is at */
- private long entryOffset;
-
- /** An input stream to read from */
- private final InputStream is;
-
- /** The meta-data about the current entry */
- private TarArchiveEntry currEntry;
-
- /** The encoding of the file */
- private final ZipEncoding zipEncoding;
-
- // the provided encoding (for unit tests)
- final String encoding;
-
- // the global PAX header
- private Map globalPaxHeaders = new HashMap<>();
-
- /**
- * Constructor for TarInputStream.
- * @param is the input stream to use
- */
- public TarArchiveInputStream(final InputStream is) {
- this(is, TarConstants.DEFAULT_BLKSIZE, TarConstants.DEFAULT_RCDSIZE);
- }
-
- /**
- * Constructor for TarInputStream.
- * @param is the input stream to use
- * @param encoding name of the encoding to use for file names
- * @since 1.4
- */
- public TarArchiveInputStream(final InputStream is, final String encoding) {
- this(is, TarConstants.DEFAULT_BLKSIZE, TarConstants.DEFAULT_RCDSIZE,
- encoding);
- }
-
- /**
- * Constructor for TarInputStream.
- * @param is the input stream to use
- * @param blockSize the block size to use
- */
- public TarArchiveInputStream(final InputStream is, final int blockSize) {
- this(is, blockSize, TarConstants.DEFAULT_RCDSIZE);
- }
-
- /**
- * Constructor for TarInputStream.
- * @param is the input stream to use
- * @param blockSize the block size to use
- * @param encoding name of the encoding to use for file names
- * @since 1.4
- */
- public TarArchiveInputStream(final InputStream is, final int blockSize,
- final String encoding) {
- this(is, blockSize, TarConstants.DEFAULT_RCDSIZE, encoding);
- }
-
- /**
- * Constructor for TarInputStream.
- * @param is the input stream to use
- * @param blockSize the block size to use
- * @param recordSize the record size to use
- */
- public TarArchiveInputStream(final InputStream is, final int blockSize, final int recordSize) {
- this(is, blockSize, recordSize, null);
- }
-
- /**
- * Constructor for TarInputStream.
- * @param is the input stream to use
- * @param blockSize the block size to use
- * @param recordSize the record size to use
- * @param encoding name of the encoding to use for file names
- * @since 1.4
- */
- public TarArchiveInputStream(final InputStream is, final int blockSize, final int recordSize,
- final String encoding) {
- this.is = is;
- this.hasHitEOF = false;
- this.encoding = encoding;
- this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
- this.recordSize = recordSize;
- this.blockSize = blockSize;
- }
-
- /**
- * Closes this stream. Calls the TarBuffer's close() method.
- * @throws IOException on error
- */
- @Override
- public void close() throws IOException {
- is.close();
- }
-
- /**
- * Get the record size being used by this stream's buffer.
- *
- * @return The TarBuffer record size.
- */
- public int getRecordSize() {
- return recordSize;
- }
-
- /**
- * Get the available data that can be read from the current
- * entry in the archive. This does not indicate how much data
- * is left in the entire archive, only in the current entry.
- * This value is determined from the entry's size header field
- * and the amount of data already read from the current entry.
- * Integer.MAX_VALUE is returned in case more than Integer.MAX_VALUE
- * bytes are left in the current entry in the archive.
- *
- * @return The number of available bytes for the current entry.
- * @throws IOException for signature
- */
- @Override
- public int available() throws IOException {
- if (isDirectory()) {
- return 0;
- }
- if (entrySize - entryOffset > Integer.MAX_VALUE) {
- return Integer.MAX_VALUE;
- }
- return (int) (entrySize - entryOffset);
- }
-
-
- /**
- * Skips over and discards n bytes of data from this input
- * stream. The skip method may, for a variety of reasons, end
- * up skipping over some smaller number of bytes, possibly 0.
- * This may result from any of a number of conditions; reaching end of file
- * or end of entry before n bytes have been skipped; are only
- * two possibilities. The actual number of bytes skipped is returned. If
- * n is negative, no bytes are skipped.
- *
- *
- * @param n
- * the number of bytes to be skipped.
- * @return the actual number of bytes skipped.
- * @throws IOException
- * if some other I/O error occurs.
- */
- @Override
- public long skip(final long n) throws IOException {
- if (n <= 0 || isDirectory()) {
- return 0;
- }
-
- final long available = entrySize - entryOffset;
- final long skipped = IOUtils.skip(is, Math.min(n, available));
- count(skipped);
- entryOffset += skipped;
- return skipped;
- }
-
- /**
- * Since we do not support marking just yet, we return false.
- *
- * @return False.
- */
- @Override
- public boolean markSupported() {
- return false;
- }
-
- /**
- * Since we do not support marking just yet, we do nothing.
- *
- * @param markLimit The limit to mark.
- */
- @Override
- public void mark(final int markLimit) {
- }
-
- /**
- * Since we do not support marking just yet, we do nothing.
- */
- @Override
- public synchronized void reset() {
- }
-
- /**
- * Get the next entry in this tar archive. This will skip
- * over any remaining data in the current entry, if there
- * is one, and place the input stream at the header of the
- * next entry, and read the header and instantiate a new
- * TarEntry from the header bytes and return that entry.
- * If there are no more entries in the archive, null will
- * be returned to indicate that the end of the archive has
- * been reached.
- *
- * @return The next TarEntry in the archive, or null.
- * @throws IOException on error
- */
- public TarArchiveEntry getNextTarEntry() throws IOException {
- if (isAtEOF()) {
- return null;
- }
-
- if (currEntry != null) {
- /* Skip will only go to the end of the current entry */
- IOUtils.skip(this, Long.MAX_VALUE);
-
- /* skip to the end of the last record */
- skipRecordPadding();
- }
-
- final byte[] headerBuf = getRecord();
-
- if (headerBuf == null) {
- /* hit EOF */
- currEntry = null;
- return null;
- }
-
- try {
- currEntry = new TarArchiveEntry(headerBuf, zipEncoding);
- } catch (final IllegalArgumentException e) {
- throw new IOException("Error detected parsing the header", e);
- }
-
- entryOffset = 0;
- entrySize = currEntry.getSize();
-
- if (currEntry.isGNULongLinkEntry()) {
- final byte[] longLinkData = getLongNameData();
- if (longLinkData == null) {
- // Bugzilla: 40334
- // Malformed tar file - long link entry name not followed by
- // entry
- return null;
- }
- currEntry.setLinkName(zipEncoding.decode(longLinkData));
- }
-
- if (currEntry.isGNULongNameEntry()) {
- final byte[] longNameData = getLongNameData();
- if (longNameData == null) {
- // Bugzilla: 40334
- // Malformed tar file - long entry name not followed by
- // entry
- return null;
- }
- currEntry.setName(zipEncoding.decode(longNameData));
- }
-
- if (currEntry.isGlobalPaxHeader()){ // Process Global Pax headers
- readGlobalPaxHeaders();
- }
-
- if (currEntry.isPaxHeader()){ // Process Pax headers
- paxHeaders();
- } else if (!globalPaxHeaders.isEmpty()) {
- applyPaxHeadersToCurrentEntry(globalPaxHeaders);
- }
-
- if (currEntry.isOldGNUSparse()){ // Process sparse files
- readOldGNUSparse();
- }
-
- // If the size of the next element in the archive has changed
- // due to a new size being reported in the posix header
- // information, we update entrySize here so that it contains
- // the correct value.
- entrySize = currEntry.getSize();
-
- return currEntry;
- }
-
- /**
- * The last record block should be written at the full size, so skip any
- * additional space used to fill a record after an entry
- */
- private void skipRecordPadding() throws IOException {
- if (!isDirectory() && this.entrySize > 0 && this.entrySize % this.recordSize != 0) {
- final long numRecords = (this.entrySize / this.recordSize) + 1;
- final long padding = (numRecords * this.recordSize) - this.entrySize;
- final long skipped = IOUtils.skip(is, padding);
- count(skipped);
- }
- }
-
- /**
- * Get the next entry in this tar archive as longname data.
- *
- * @return The next entry in the archive as longname data, or null.
- * @throws IOException on error
- */
- protected byte[] getLongNameData() throws IOException {
- // read in the name
- final ByteArrayOutputStream longName = new ByteArrayOutputStream();
- int length = 0;
- while ((length = read(smallBuf)) >= 0) {
- longName.write(smallBuf, 0, length);
- }
- getNextEntry();
- if (currEntry == null) {
- // Bugzilla: 40334
- // Malformed tar file - long entry name not followed by entry
- return null;
- }
- byte[] longNameData = longName.toByteArray();
- // remove trailing null terminator(s)
- length = longNameData.length;
- while (length > 0 && longNameData[length - 1] == 0) {
- --length;
- }
- if (length != longNameData.length) {
- final byte[] l = new byte[length];
- System.arraycopy(longNameData, 0, l, 0, length);
- longNameData = l;
- }
- return longNameData;
- }
-
- /**
- * Get the next record in this tar archive. This will skip
- * over any remaining data in the current entry, if there
- * is one, and place the input stream at the header of the
- * next entry.
- *
- *
If there are no more entries in the archive, null will be
- * returned to indicate that the end of the archive has been
- * reached. At the same time the {@code hasHitEOF} marker will be
- * set to true.
- *
- * @return The next header in the archive, or null.
- * @throws IOException on error
- */
- private byte[] getRecord() throws IOException {
- byte[] headerBuf = readRecord();
- setAtEOF(isEOFRecord(headerBuf));
- if (isAtEOF() && headerBuf != null) {
- tryToConsumeSecondEOFRecord();
- consumeRemainderOfLastBlock();
- headerBuf = null;
- }
- return headerBuf;
- }
-
- /**
- * Determine if an archive record indicate End of Archive. End of
- * archive is indicated by a record that consists entirely of null bytes.
- *
- * @param record The record data to check.
- * @return true if the record data is an End of Archive
- */
- protected boolean isEOFRecord(final byte[] record) {
- return record == null || ArchiveUtils.isArrayZero(record, recordSize);
- }
-
- /**
- * Read a record from the input stream and return the data.
- *
- * @return The record data or null if EOF has been hit.
- * @throws IOException on error
- */
- protected byte[] readRecord() throws IOException {
-
- final byte[] record = new byte[recordSize];
-
- final int readNow = IOUtils.readFully(is, record);
- count(readNow);
- if (readNow != recordSize) {
- return null;
- }
-
- return record;
- }
-
- private void readGlobalPaxHeaders() throws IOException {
- globalPaxHeaders = parsePaxHeaders(this);
- getNextEntry(); // Get the actual file entry
- }
-
- private void paxHeaders() throws IOException{
- final Map headers = parsePaxHeaders(this);
- getNextEntry(); // Get the actual file entry
- applyPaxHeadersToCurrentEntry(headers);
- }
-
- // NOTE, using a Map here makes it impossible to ever support GNU
- // sparse files using the PAX Format 0.0, see
- // https://www.gnu.org/software/tar/manual/html_section/tar_92.html#SEC188
- Map parsePaxHeaders(final InputStream i)
- throws IOException {
- final Map headers = new HashMap<>(globalPaxHeaders);
- // Format is "length keyword=value\n";
- while(true){ // get length
- int ch;
- int len = 0;
- int read = 0;
- while((ch = i.read()) != -1) {
- read++;
- if (ch == '\n') { // blank line in header
- break;
- } else if (ch == ' '){ // End of length string
- // Get keyword
- final ByteArrayOutputStream coll = new ByteArrayOutputStream();
- while((ch = i.read()) != -1) {
- read++;
- if (ch == '='){ // end of keyword
- final String keyword = coll.toString(CharsetNames.UTF_8);
- // Get rest of entry
- final int restLen = len - read;
- if (restLen == 1) { // only NL
- headers.remove(keyword);
- } else {
- final byte[] rest = new byte[restLen];
- final int got = IOUtils.readFully(i, rest);
- if (got != restLen) {
- throw new IOException("Failed to read "
- + "Paxheader. Expected "
- + restLen
- + " bytes, read "
- + got);
- }
- // Drop trailing NL
- final String value = new String(rest, 0,
- restLen - 1, CharsetNames.UTF_8);
- headers.put(keyword, value);
- }
- break;
- }
- coll.write((byte) ch);
- }
- break; // Processed single header
- }
- len *= 10;
- len += ch - '0';
- }
- if (ch == -1){ // EOF
- break;
- }
- }
- return headers;
- }
-
- private void applyPaxHeadersToCurrentEntry(final Map headers) {
- currEntry.updateEntryFromPaxHeaders(headers);
-
- }
-
- /**
- * Adds the sparse chunks from the current entry to the sparse chunks,
- * including any additional sparse entries following the current entry.
- *
- * @throws IOException on error
- *
- * @todo Sparse files get not yet really processed.
- */
- private void readOldGNUSparse() throws IOException {
- /* we do not really process sparse files yet
- sparses = new ArrayList();
- sparses.addAll(currEntry.getSparses());
- */
- if (currEntry.isExtended()) {
- TarArchiveSparseEntry entry;
- do {
- final byte[] headerBuf = getRecord();
- if (headerBuf == null) {
- currEntry = null;
- break;
- }
- entry = new TarArchiveSparseEntry(headerBuf);
- /* we do not really process sparse files yet
- sparses.addAll(entry.getSparses());
- */
- } while (entry.isExtended());
- }
- }
-
- private boolean isDirectory() {
- return currEntry != null && currEntry.isDirectory();
- }
-
- /**
- * Returns the next Archive Entry in this Stream.
- *
- * @return the next entry,
- * or {@code null} if there are no more entries
- * @throws IOException if the next entry could not be read
- */
- @Override
- public ArchiveEntry getNextEntry() throws IOException {
- return getNextTarEntry();
- }
-
- /**
- * Tries to read the next record rewinding the stream if it is not a EOF record.
- *
- *
This is meant to protect against cases where a tar
- * implementation has written only one EOF record when two are
- * expected. Actually this won't help since a non-conforming
- * implementation likely won't fill full blocks consisting of - by
- * default - ten records either so we probably have already read
- * beyond the archive anyway.
- */
- private void tryToConsumeSecondEOFRecord() throws IOException {
- boolean shouldReset = true;
- final boolean marked = is.markSupported();
- if (marked) {
- is.mark(recordSize);
- }
- try {
- shouldReset = !isEOFRecord(readRecord());
- } finally {
- if (shouldReset && marked) {
- pushedBackBytes(recordSize);
- is.reset();
- }
- }
- }
-
- /**
- * Reads bytes from the current tar archive entry.
- *
- * This method is aware of the boundaries of the current
- * entry in the archive and will deal with them as if they
- * were this stream's start and EOF.
- *
- * @param buf The buffer into which to place bytes read.
- * @param offset The offset at which to place bytes read.
- * @param numToRead The number of bytes to read.
- * @return The number of bytes read, or -1 at EOF.
- * @throws IOException on error
- */
- @Override
- public int read(final byte[] buf, final int offset, int numToRead) throws IOException {
- int totalRead = 0;
-
- if (isAtEOF() || isDirectory() || entryOffset >= entrySize) {
- return -1;
- }
-
- if (currEntry == null) {
- throw new IllegalStateException("No current tar entry");
- }
-
- numToRead = Math.min(numToRead, available());
-
- totalRead = is.read(buf, offset, numToRead);
-
- if (totalRead == -1) {
- if (numToRead > 0) {
- throw new IOException("Truncated TAR archive");
- }
- setAtEOF(true);
- } else {
- count(totalRead);
- entryOffset += totalRead;
- }
-
- return totalRead;
- }
-
- /**
- * Whether this class is able to read the given entry.
- *
- *
May return false if the current entry is a sparse file.
- */
- @Override
- public boolean canReadEntryData(final ArchiveEntry ae) {
- if (ae instanceof TarArchiveEntry) {
- final TarArchiveEntry te = (TarArchiveEntry) ae;
- return !te.isSparse();
- }
- return false;
- }
-
- /**
- * Get the current TAR Archive Entry that this input stream is processing
- *
- * @return The current Archive Entry
- */
- public TarArchiveEntry getCurrentEntry() {
- return currEntry;
- }
-
- protected final void setCurrentEntry(final TarArchiveEntry e) {
- currEntry = e;
- }
-
- protected final boolean isAtEOF() {
- return hasHitEOF;
- }
-
- protected final void setAtEOF(final boolean b) {
- hasHitEOF = b;
- }
-
- /**
- * This method is invoked once the end of the archive is hit, it
- * tries to consume the remaining bytes under the assumption that
- * the tool creating this archive has padded the last block.
- */
- private void consumeRemainderOfLastBlock() throws IOException {
- final long bytesReadOfLastBlock = getBytesRead() % blockSize;
- if (bytesReadOfLastBlock > 0) {
- final long skipped = IOUtils.skip(is, blockSize - bytesReadOfLastBlock);
- count(skipped);
- }
- }
-
- /**
- * Checks if the signature matches what is expected for a tar file.
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this stream is a tar archive stream, false otherwise
- */
- public static boolean matches(final byte[] signature, final int length) {
- if (length < TarConstants.VERSION_OFFSET+TarConstants.VERSIONLEN) {
- return false;
- }
-
- if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_POSIX,
- signature, TarConstants.MAGIC_OFFSET, TarConstants.MAGICLEN)
- &&
- ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_POSIX,
- signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN)
- ){
- return true;
- }
- if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_GNU,
- signature, TarConstants.MAGIC_OFFSET, TarConstants.MAGICLEN)
- &&
- (
- ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_GNU_SPACE,
- signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN)
- ||
- ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_GNU_ZERO,
- signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN)
- )
- ){
- return true;
- }
- // COMPRESS-107 - recognise Ant tar files
- return ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_ANT,
- signature, TarConstants.MAGIC_OFFSET, TarConstants.MAGICLEN)
- &&
- ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_ANT,
- signature, TarConstants.VERSION_OFFSET, TarConstants.VERSIONLEN);
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/tar/TarArchiveOutputStream.java b/src/org/apache/commons/compress/archivers/tar/TarArchiveOutputStream.java
deleted file mode 100644
index 382f06fe185..00000000000
--- a/src/org/apache/commons/compress/archivers/tar/TarArchiveOutputStream.java
+++ /dev/null
@@ -1,697 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.tar;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.StringWriter;
-import java.io.UnsupportedEncodingException;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.archivers.zip.ZipEncoding;
-import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
-import org.apache.commons.compress.utils.CharsetNames;
-import org.apache.commons.compress.utils.CountingOutputStream;
-import org.apache.commons.compress.utils.FixedLengthBlockOutputStream;
-
-/**
- * The TarOutputStream writes a UNIX tar archive as an OutputStream. Methods are provided to put
- * entries, and then write their contents by writing to this stream using write().
- *
- *
tar archives consist of a sequence of records of 512 bytes each
- * that are grouped into blocks. Prior to Apache Commons Compress 1.14
- * it has been possible to configure a record size different from 512
- * bytes and arbitrary block sizes. Starting with Compress 1.15 512 is
- * the only valid option for the record size and the block size must
- * be a multiple of 512. Also the default block size changed from
- * 10240 bytes prior to Compress 1.15 to 512 bytes with Compress
- * 1.15.
- *
- * @NotThreadSafe
- */
-public class TarArchiveOutputStream extends ArchiveOutputStream {
-
- /**
- * Fail if a long file name is required in the archive.
- */
- public static final int LONGFILE_ERROR = 0;
-
- /**
- * Long paths will be truncated in the archive.
- */
- public static final int LONGFILE_TRUNCATE = 1;
-
- /**
- * GNU tar extensions are used to store long file names in the archive.
- */
- public static final int LONGFILE_GNU = 2;
-
- /**
- * POSIX/PAX extensions are used to store long file names in the archive.
- */
- public static final int LONGFILE_POSIX = 3;
-
- /**
- * Fail if a big number (e.g. size > 8GiB) is required in the archive.
- */
- public static final int BIGNUMBER_ERROR = 0;
-
- /**
- * star/GNU tar/BSD tar extensions are used to store big number in the archive.
- */
- public static final int BIGNUMBER_STAR = 1;
-
- /**
- * POSIX/PAX extensions are used to store big numbers in the archive.
- */
- public static final int BIGNUMBER_POSIX = 2;
- private static final int RECORD_SIZE = 512;
-
- private long currSize;
- private String currName;
- private long currBytes;
- private final byte[] recordBuf;
- private int longFileMode = LONGFILE_ERROR;
- private int bigNumberMode = BIGNUMBER_ERROR;
- private int recordsWritten;
- private final int recordsPerBlock;
-
- private boolean closed = false;
-
- /**
- * Indicates if putArchiveEntry has been called without closeArchiveEntry
- */
- private boolean haveUnclosedEntry = false;
-
- /**
- * indicates if this archive is finished
- */
- private boolean finished = false;
-
- private final FixedLengthBlockOutputStream out;
- private final CountingOutputStream countingOut;
-
- private final ZipEncoding zipEncoding;
-
- // the provided encoding (for unit tests)
- final String encoding;
-
- private boolean addPaxHeadersForNonAsciiNames = false;
- private static final ZipEncoding ASCII =
- ZipEncodingHelper.getZipEncoding("ASCII");
-
- private static final int BLOCK_SIZE_UNSPECIFIED = -511;
-
- /**
- * Constructor for TarArchiveOutputStream.
- *
- *
Uses a block size of 512 bytes.
- *
- * @param os the output stream to use
- */
- public TarArchiveOutputStream(final OutputStream os) {
- this(os, BLOCK_SIZE_UNSPECIFIED);
- }
-
- /**
- * Constructor for TarArchiveOutputStream.
- *
- *
Uses a block size of 512 bytes.
- *
- * @param os the output stream to use
- * @param encoding name of the encoding to use for file names
- * @since 1.4
- */
- public TarArchiveOutputStream(final OutputStream os, final String encoding) {
- this(os, BLOCK_SIZE_UNSPECIFIED, encoding);
- }
-
- /**
- * Constructor for TarArchiveOutputStream.
- *
- * @param os the output stream to use
- * @param blockSize the block size to use. Must be a multiple of 512 bytes.
- */
- public TarArchiveOutputStream(final OutputStream os, final int blockSize) {
- this(os, blockSize, null);
- }
-
-
- /**
- * Constructor for TarArchiveOutputStream.
- *
- * @param os the output stream to use
- * @param blockSize the block size to use
- * @param recordSize the record size to use. Must be 512 bytes.
- * @deprecated recordSize must always be 512 bytes. An IllegalArgumentException will be thrown
- * if any other value is used
- */
- @Deprecated
- public TarArchiveOutputStream(final OutputStream os, final int blockSize,
- final int recordSize) {
- this(os, blockSize, recordSize, null);
- }
-
- /**
- * Constructor for TarArchiveOutputStream.
- *
- * @param os the output stream to use
- * @param blockSize the block size to use . Must be a multiple of 512 bytes.
- * @param recordSize the record size to use. Must be 512 bytes.
- * @param encoding name of the encoding to use for file names
- * @since 1.4
- * @deprecated recordSize must always be 512 bytes. An IllegalArgumentException will be thrown
- * if any other value is used.
- */
- @Deprecated
- public TarArchiveOutputStream(final OutputStream os, final int blockSize,
- final int recordSize, final String encoding) {
- this(os, blockSize, encoding);
- if (recordSize != RECORD_SIZE) {
- throw new IllegalArgumentException(
- "Tar record size must always be 512 bytes. Attempt to set size of " + recordSize);
- }
-
- }
-
- /**
- * Constructor for TarArchiveOutputStream.
- *
- * @param os the output stream to use
- * @param blockSize the block size to use. Must be a multiple of 512 bytes.
- * @param encoding name of the encoding to use for file names
- * @since 1.4
- */
- public TarArchiveOutputStream(final OutputStream os, final int blockSize,
- final String encoding) {
- int realBlockSize;
- if (BLOCK_SIZE_UNSPECIFIED == blockSize) {
- realBlockSize = RECORD_SIZE;
- } else {
- realBlockSize = blockSize;
- }
-
- if (realBlockSize <=0 || realBlockSize % RECORD_SIZE != 0) {
- throw new IllegalArgumentException("Block size must be a multiple of 512 bytes. Attempt to use set size of " + blockSize);
- }
- out = new FixedLengthBlockOutputStream(countingOut = new CountingOutputStream(os),
- RECORD_SIZE);
- this.encoding = encoding;
- this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
-
- this.recordBuf = new byte[RECORD_SIZE];
- this.recordsPerBlock = realBlockSize / RECORD_SIZE;
- }
-
- /**
- * Set the long file mode. This can be LONGFILE_ERROR(0), LONGFILE_TRUNCATE(1) or
- * LONGFILE_GNU(2). This specifies the treatment of long file names (names >=
- * TarConstants.NAMELEN). Default is LONGFILE_ERROR.
- *
- * @param longFileMode the mode to use
- */
- public void setLongFileMode(final int longFileMode) {
- this.longFileMode = longFileMode;
- }
-
- /**
- * Set the big number mode. This can be BIGNUMBER_ERROR(0), BIGNUMBER_POSIX(1) or
- * BIGNUMBER_STAR(2). This specifies the treatment of big files (sizes >
- * TarConstants.MAXSIZE) and other numeric values to big to fit into a traditional tar header.
- * Default is BIGNUMBER_ERROR.
- *
- * @param bigNumberMode the mode to use
- * @since 1.4
- */
- public void setBigNumberMode(final int bigNumberMode) {
- this.bigNumberMode = bigNumberMode;
- }
-
- /**
- * Whether to add a PAX extension header for non-ASCII file names.
- *
- * @param b whether to add a PAX extension header for non-ASCII file names.
- * @since 1.4
- */
- public void setAddPaxHeadersForNonAsciiNames(final boolean b) {
- addPaxHeadersForNonAsciiNames = b;
- }
-
- @Deprecated
- @Override
- public int getCount() {
- return (int) getBytesWritten();
- }
-
- @Override
- public long getBytesWritten() {
- return countingOut.getBytesWritten();
- }
-
- /**
- * Ends the TAR archive without closing the underlying OutputStream.
- *
- * An archive consists of a series of file entries terminated by an
- * end-of-archive entry, which consists of two 512 blocks of zero bytes.
- * POSIX.1 requires two EOF records, like some other implementations.
- *
- * @throws IOException on error
- */
- @Override
- public void finish() throws IOException {
- if (finished) {
- throw new IOException("This archive has already been finished");
- }
-
- if (haveUnclosedEntry) {
- throw new IOException("This archive contains unclosed entries.");
- }
- writeEOFRecord();
- writeEOFRecord();
- padAsNeeded();
- out.flush();
- finished = true;
- }
-
- /**
- * Closes the underlying OutputStream.
- *
- * @throws IOException on error
- */
- @Override
- public void close() throws IOException {
- if (!finished) {
- finish();
- }
-
- if (!closed) {
- out.close();
- closed = true;
- }
- }
-
- /**
- * Get the record size being used by this stream's TarBuffer.
- *
- * @return The TarBuffer record size.
- * @deprecated
- */
- @Deprecated
- public int getRecordSize() {
- return RECORD_SIZE;
- }
-
- /**
- * Put an entry on the output stream. This writes the entry's header record and positions the
- * output stream for writing the contents of the entry. Once this method is called, the stream
- * is ready for calls to write() to write the entry's contents. Once the contents are written,
- * closeArchiveEntry() MUST be called to ensure that all buffered data is completely
- * written to the output stream.
- *
- * @param archiveEntry The TarEntry to be written to the archive.
- * @throws IOException on error
- * @throws ClassCastException if archiveEntry is not an instance of TarArchiveEntry
- */
- @Override
- public void putArchiveEntry(final ArchiveEntry archiveEntry) throws IOException {
- if (finished) {
- throw new IOException("Stream has already been finished");
- }
- final TarArchiveEntry entry = (TarArchiveEntry) archiveEntry;
- if (entry.isGlobalPaxHeader()) {
- final byte[] data = encodeExtendedPaxHeadersContents(entry.getExtraPaxHeaders());
- entry.setSize(data.length);
- entry.writeEntryHeader(recordBuf, zipEncoding, bigNumberMode == BIGNUMBER_STAR);
- writeRecord(recordBuf);
- currSize= entry.getSize();
- currBytes = 0;
- this.haveUnclosedEntry = true;
- write(data);
- closeArchiveEntry();
- } else {
- final Map paxHeaders = new HashMap<>();
- final String entryName = entry.getName();
- final boolean paxHeaderContainsPath = handleLongName(entry, entryName, paxHeaders, "path",
- TarConstants.LF_GNUTYPE_LONGNAME, "file name");
-
- final String linkName = entry.getLinkName();
- final boolean paxHeaderContainsLinkPath = linkName != null && linkName.length() > 0
- && handleLongName(entry, linkName, paxHeaders, "linkpath",
- TarConstants.LF_GNUTYPE_LONGLINK, "link name");
-
- if (bigNumberMode == BIGNUMBER_POSIX) {
- addPaxHeadersForBigNumbers(paxHeaders, entry);
- } else if (bigNumberMode != BIGNUMBER_STAR) {
- failForBigNumbers(entry);
- }
-
- if (addPaxHeadersForNonAsciiNames && !paxHeaderContainsPath
- && !ASCII.canEncode(entryName)) {
- paxHeaders.put("path", entryName);
- }
-
- if (addPaxHeadersForNonAsciiNames && !paxHeaderContainsLinkPath
- && (entry.isLink() || entry.isSymbolicLink())
- && !ASCII.canEncode(linkName)) {
- paxHeaders.put("linkpath", linkName);
- }
- paxHeaders.putAll(entry.getExtraPaxHeaders());
-
- if (paxHeaders.size() > 0) {
- writePaxHeaders(entry, entryName, paxHeaders);
- }
-
- entry.writeEntryHeader(recordBuf, zipEncoding, bigNumberMode == BIGNUMBER_STAR);
- writeRecord(recordBuf);
-
- currBytes = 0;
-
- if (entry.isDirectory()) {
- currSize = 0;
- } else {
- currSize = entry.getSize();
- }
- currName = entryName;
- haveUnclosedEntry = true;
- }
- }
-
- /**
- * Close an entry. This method MUST be called for all file entries that contain data. The reason
- * is that we must buffer data written to the stream in order to satisfy the buffer's record
- * based writes. Thus, there may be data fragments still being assembled that must be written to
- * the output stream before this entry is closed and the next entry written.
- *
- * @throws IOException on error
- */
- @Override
- public void closeArchiveEntry() throws IOException {
- if (finished) {
- throw new IOException("Stream has already been finished");
- }
- if (!haveUnclosedEntry) {
- throw new IOException("No current entry to close");
- }
- out.flushBlock();
- if (currBytes < currSize) {
- throw new IOException("entry '" + currName + "' closed at '"
- + currBytes
- + "' before the '" + currSize
- + "' bytes specified in the header were written");
- }
- recordsWritten += (currSize / RECORD_SIZE);
- if (0 != currSize % RECORD_SIZE) {
- recordsWritten++;
- }
- haveUnclosedEntry = false;
- }
-
- /**
- * Writes bytes to the current tar archive entry. This method is aware of the current entry and
- * will throw an exception if you attempt to write bytes past the length specified for the
- * current entry.
- *
- * @param wBuf The buffer to write to the archive.
- * @param wOffset The offset in the buffer from which to get bytes.
- * @param numToWrite The number of bytes to write.
- * @throws IOException on error
- */
- @Override
- public void write(final byte[] wBuf, int wOffset, int numToWrite) throws IOException {
- if (!haveUnclosedEntry) {
- throw new IllegalStateException("No current tar entry");
- }
- if (currBytes + numToWrite > currSize) {
- throw new IOException("request to write '" + numToWrite
- + "' bytes exceeds size in header of '"
- + currSize + "' bytes for entry '"
- + currName + "'");
- }
- out.write(wBuf, wOffset, numToWrite);
- currBytes += numToWrite;
- }
-
- /**
- * Writes a PAX extended header with the given map as contents.
- *
- * @since 1.4
- */
- void writePaxHeaders(final TarArchiveEntry entry,
- final String entryName,
- final Map headers) throws IOException {
- String name = "./PaxHeaders.X/" + stripTo7Bits(entryName);
- if (name.length() >= TarConstants.NAMELEN) {
- name = name.substring(0, TarConstants.NAMELEN - 1);
- }
- final TarArchiveEntry pex = new TarArchiveEntry(name,
- TarConstants.LF_PAX_EXTENDED_HEADER_LC);
- transferModTime(entry, pex);
-
- final byte[] data = encodeExtendedPaxHeadersContents(headers);
- pex.setSize(data.length);
- putArchiveEntry(pex);
- write(data);
- closeArchiveEntry();
- }
-
- private byte[] encodeExtendedPaxHeadersContents(Map headers)
- throws UnsupportedEncodingException {
- final StringWriter w = new StringWriter();
- for (final Map.Entry h : headers.entrySet()) {
- final String key = h.getKey();
- final String value = h.getValue();
- int len = key.length() + value.length()
- + 3 /* blank, equals and newline */
- + 2 /* guess 9 < actual length < 100 */;
- String line = len + " " + key + "=" + value + "\n";
- int actualLength = line.getBytes(CharsetNames.UTF_8).length;
- while (len != actualLength) {
- // Adjust for cases where length < 10 or > 100
- // or where UTF-8 encoding isn't a single octet
- // per character.
- // Must be in loop as size may go from 99 to 100 in
- // first pass so we'd need a second.
- len = actualLength;
- line = len + " " + key + "=" + value + "\n";
- actualLength = line.getBytes(CharsetNames.UTF_8).length;
- }
- w.write(line);
- }
- return w.toString().getBytes(CharsetNames.UTF_8);
- }
-
- private String stripTo7Bits(final String name) {
- final int length = name.length();
- final StringBuilder result = new StringBuilder(length);
- for (int i = 0; i < length; i++) {
- final char stripped = (char) (name.charAt(i) & 0x7F);
- if (shouldBeReplaced(stripped)) {
- result.append("_");
- } else {
- result.append(stripped);
- }
- }
- return result.toString();
- }
-
- /**
- * @return true if the character could lead to problems when used inside a TarArchiveEntry name
- * for a PAX header.
- */
- private boolean shouldBeReplaced(final char c) {
- return c == 0 // would be read as Trailing null
- || c == '/' // when used as last character TAE will consider the PAX header a directory
- || c == '\\'; // same as '/' as slashes get "normalized" on Windows
- }
-
- /**
- * Write an EOF (end of archive) record to the tar archive. An EOF record consists of a record
- * of all zeros.
- */
- private void writeEOFRecord() throws IOException {
- Arrays.fill(recordBuf, (byte) 0);
- writeRecord(recordBuf);
- }
-
- @Override
- public void flush() throws IOException {
- out.flush();
- }
-
- @Override
- public ArchiveEntry createArchiveEntry(final File inputFile, final String entryName)
- throws IOException {
- if (finished) {
- throw new IOException("Stream has already been finished");
- }
- return new TarArchiveEntry(inputFile, entryName);
- }
-
- /**
- * Write an archive record to the archive.
- *
- * @param record The record data to write to the archive.
- * @throws IOException on error
- */
- private void writeRecord(final byte[] record) throws IOException {
- if (record.length != RECORD_SIZE) {
- throw new IOException("record to write has length '"
- + record.length
- + "' which is not the record size of '"
- + RECORD_SIZE + "'");
- }
-
- out.write(record);
- recordsWritten++;
- }
-
- private void padAsNeeded() throws IOException {
- final int start = recordsWritten % recordsPerBlock;
- if (start != 0) {
- for (int i = start; i < recordsPerBlock; i++) {
- writeEOFRecord();
- }
- }
- }
-
- private void addPaxHeadersForBigNumbers(final Map paxHeaders,
- final TarArchiveEntry entry) {
- addPaxHeaderForBigNumber(paxHeaders, "size", entry.getSize(),
- TarConstants.MAXSIZE);
- addPaxHeaderForBigNumber(paxHeaders, "gid", entry.getLongGroupId(),
- TarConstants.MAXID);
- addPaxHeaderForBigNumber(paxHeaders, "mtime",
- entry.getModTime().getTime() / 1000,
- TarConstants.MAXSIZE);
- addPaxHeaderForBigNumber(paxHeaders, "uid", entry.getLongUserId(),
- TarConstants.MAXID);
- // star extensions by J\u00f6rg Schilling
- addPaxHeaderForBigNumber(paxHeaders, "SCHILY.devmajor",
- entry.getDevMajor(), TarConstants.MAXID);
- addPaxHeaderForBigNumber(paxHeaders, "SCHILY.devminor",
- entry.getDevMinor(), TarConstants.MAXID);
- // there is no PAX header for file mode
- failForBigNumber("mode", entry.getMode(), TarConstants.MAXID);
- }
-
- private void addPaxHeaderForBigNumber(final Map paxHeaders,
- final String header, final long value,
- final long maxValue) {
- if (value < 0 || value > maxValue) {
- paxHeaders.put(header, String.valueOf(value));
- }
- }
-
- private void failForBigNumbers(final TarArchiveEntry entry) {
- failForBigNumber("entry size", entry.getSize(), TarConstants.MAXSIZE);
- failForBigNumberWithPosixMessage("group id", entry.getLongGroupId(), TarConstants.MAXID);
- failForBigNumber("last modification time",
- entry.getModTime().getTime() / 1000,
- TarConstants.MAXSIZE);
- failForBigNumber("user id", entry.getLongUserId(), TarConstants.MAXID);
- failForBigNumber("mode", entry.getMode(), TarConstants.MAXID);
- failForBigNumber("major device number", entry.getDevMajor(),
- TarConstants.MAXID);
- failForBigNumber("minor device number", entry.getDevMinor(),
- TarConstants.MAXID);
- }
-
- private void failForBigNumber(final String field, final long value, final long maxValue) {
- failForBigNumber(field, value, maxValue, "");
- }
-
- private void failForBigNumberWithPosixMessage(final String field, final long value,
- final long maxValue) {
- failForBigNumber(field, value, maxValue,
- " Use STAR or POSIX extensions to overcome this limit");
- }
-
- private void failForBigNumber(final String field, final long value, final long maxValue,
- final String additionalMsg) {
- if (value < 0 || value > maxValue) {
- throw new RuntimeException(field + " '" + value //NOSONAR
- + "' is too big ( > "
- + maxValue + " )." + additionalMsg);
- }
- }
-
- /**
- * Handles long file or link names according to the longFileMode setting.
- *
- *
I.e. if the given name is too long to be written to a plain tar header then
it
- * creates a pax header who's name is given by the paxHeaderName parameter if longFileMode is
- * POSIX
it creates a GNU longlink entry who's type is given by the linkType parameter
- * if longFileMode is GNU
it throws an exception if longFileMode is ERROR
it
- * truncates the name if longFileMode is TRUNCATE
- *
- * @param entry entry the name belongs to
- * @param name the name to write
- * @param paxHeaders current map of pax headers
- * @param paxHeaderName name of the pax header to write
- * @param linkType type of the GNU entry to write
- * @param fieldName the name of the field
- * @return whether a pax header has been written.
- */
- private boolean handleLongName(final TarArchiveEntry entry, final String name,
- final Map paxHeaders,
- final String paxHeaderName, final byte linkType, final String fieldName)
- throws IOException {
- final ByteBuffer encodedName = zipEncoding.encode(name);
- final int len = encodedName.limit() - encodedName.position();
- if (len >= TarConstants.NAMELEN) {
-
- if (longFileMode == LONGFILE_POSIX) {
- paxHeaders.put(paxHeaderName, name);
- return true;
- } else if (longFileMode == LONGFILE_GNU) {
- // create a TarEntry for the LongLink, the contents
- // of which are the link's name
- final TarArchiveEntry longLinkEntry = new TarArchiveEntry(TarConstants.GNU_LONGLINK,
- linkType);
-
- longLinkEntry.setSize(len + 1L); // +1 for NUL
- transferModTime(entry, longLinkEntry);
- putArchiveEntry(longLinkEntry);
- write(encodedName.array(), encodedName.arrayOffset(), len);
- write(0); // NUL terminator
- closeArchiveEntry();
- } else if (longFileMode != LONGFILE_TRUNCATE) {
- throw new RuntimeException(fieldName + " '" + name //NOSONAR
- + "' is too long ( > "
- + TarConstants.NAMELEN + " bytes)");
- }
- }
- return false;
- }
-
- private void transferModTime(final TarArchiveEntry from, final TarArchiveEntry to) {
- Date fromModTime = from.getModTime();
- final long fromModTimeSeconds = fromModTime.getTime() / 1000;
- if (fromModTimeSeconds < 0 || fromModTimeSeconds > TarConstants.MAXSIZE) {
- fromModTime = new Date(0);
- }
- to.setModTime(fromModTime);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/tar/TarArchiveSparseEntry.java b/src/org/apache/commons/compress/archivers/tar/TarArchiveSparseEntry.java
deleted file mode 100644
index a49e7180c72..00000000000
--- a/src/org/apache/commons/compress/archivers/tar/TarArchiveSparseEntry.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.tar;
-
-import java.io.IOException;
-
-/**
- * This class represents a sparse entry in a Tar archive.
- *
- *
- */
-
-public class TarArchiveSparseEntry implements TarConstants {
- /** If an extension sparse header follows. */
- private final boolean isExtended;
-
- /**
- * Construct an entry from an archive's header bytes. File is set
- * to null.
- *
- * @param headerBuf The header bytes from a tar archive entry.
- * @throws IOException on unknown format
- */
- public TarArchiveSparseEntry(final byte[] headerBuf) throws IOException {
- int offset = 0;
- offset += SPARSELEN_GNU_SPARSE;
- isExtended = TarUtils.parseBoolean(headerBuf, offset);
- }
-
- public boolean isExtended() {
- return isExtended;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/tar/TarConstants.java b/src/org/apache/commons/compress/archivers/tar/TarConstants.java
deleted file mode 100644
index 751840d448f..00000000000
--- a/src/org/apache/commons/compress/archivers/tar/TarConstants.java
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.tar;
-
-/**
- * This interface contains all the definitions used in the package.
- *
- * For tar formats (FORMAT_OLDGNU, FORMAT_POSIX, etc.) see GNU tar
- * tar.h type enum archive_format
- */
-// CheckStyle:InterfaceIsTypeCheck OFF (bc)
-public interface TarConstants {
-
- /** Default record size */
- int DEFAULT_RCDSIZE = 512;
-
- /** Default block size */
- int DEFAULT_BLKSIZE = DEFAULT_RCDSIZE * 20;
-
- /**
- * GNU format as per before tar 1.12.
- */
- int FORMAT_OLDGNU = 2;
-
- /**
- * Pure Posix format.
- */
- int FORMAT_POSIX = 3;
-
- /**
- * xstar format used by Jörg Schilling's star.
- */
- int FORMAT_XSTAR = 4;
-
- /**
- * The length of the name field in a header buffer.
- */
- int NAMELEN = 100;
-
- /**
- * The length of the mode field in a header buffer.
- */
- int MODELEN = 8;
-
- /**
- * The length of the user id field in a header buffer.
- */
- int UIDLEN = 8;
-
- /**
- * The length of the group id field in a header buffer.
- */
- int GIDLEN = 8;
-
- /**
- * The maximum value of gid/uid in a tar archive which can
- * be expressed in octal char notation (that's 7 sevens, octal).
- */
- long MAXID = 07777777L;
-
- /**
- * The length of the checksum field in a header buffer.
- */
- int CHKSUMLEN = 8;
-
- /**
- * Offset of the checksum field within header record.
- * @since 1.5
- */
- int CHKSUM_OFFSET = 148;
-
- /**
- * The length of the size field in a header buffer.
- * Includes the trailing space or NUL.
- */
- int SIZELEN = 12;
-
- /**
- * The maximum size of a file in a tar archive
- * which can be expressed in octal char notation (that's 11 sevens, octal).
- */
- long MAXSIZE = 077777777777L;
-
- /** Offset of start of magic field within header record */
- int MAGIC_OFFSET = 257;
- /**
- * The length of the magic field in a header buffer.
- */
- int MAGICLEN = 6;
-
- /** Offset of start of magic field within header record */
- int VERSION_OFFSET = 263;
- /**
- * Previously this was regarded as part of "magic" field, but it is separate.
- */
- int VERSIONLEN = 2;
-
- /**
- * The length of the modification time field in a header buffer.
- */
- int MODTIMELEN = 12;
-
- /**
- * The length of the user name field in a header buffer.
- */
- int UNAMELEN = 32;
-
- /**
- * The length of the group name field in a header buffer.
- */
- int GNAMELEN = 32;
-
- /**
- * The length of each of the device fields (major and minor) in a header buffer.
- */
- int DEVLEN = 8;
-
- /**
- * Length of the prefix field.
- *
- */
- int PREFIXLEN = 155;
-
- /**
- * The length of the access time field in an old GNU header buffer.
- *
- */
- int ATIMELEN_GNU = 12;
-
- /**
- * The length of the created time field in an old GNU header buffer.
- *
- */
- int CTIMELEN_GNU = 12;
-
- /**
- * The length of the multivolume start offset field in an old GNU header buffer.
- *
- */
- int OFFSETLEN_GNU = 12;
-
- /**
- * The length of the long names field in an old GNU header buffer.
- *
- */
- int LONGNAMESLEN_GNU = 4;
-
- /**
- * The length of the padding field in an old GNU header buffer.
- *
- */
- int PAD2LEN_GNU = 1;
-
- /**
- * The sum of the length of all sparse headers in an old GNU header buffer.
- *
- */
- int SPARSELEN_GNU = 96;
-
- /**
- * The length of the is extension field in an old GNU header buffer.
- *
- */
- int ISEXTENDEDLEN_GNU = 1;
-
- /**
- * The length of the real size field in an old GNU header buffer.
- *
- */
- int REALSIZELEN_GNU = 12;
-
- /**
- * The sum of the length of all sparse headers in a sparse header buffer.
- *
- */
- int SPARSELEN_GNU_SPARSE = 504;
-
- /**
- * The length of the is extension field in a sparse header buffer.
- *
- */
- int ISEXTENDEDLEN_GNU_SPARSE = 1;
-
- /**
- * LF_ constants represent the "link flag" of an entry, or more commonly,
- * the "entry type". This is the "old way" of indicating a normal file.
- */
- byte LF_OLDNORM = 0;
-
- /**
- * Normal file type.
- */
- byte LF_NORMAL = (byte) '0';
-
- /**
- * Link file type.
- */
- byte LF_LINK = (byte) '1';
-
- /**
- * Symbolic link file type.
- */
- byte LF_SYMLINK = (byte) '2';
-
- /**
- * Character device file type.
- */
- byte LF_CHR = (byte) '3';
-
- /**
- * Block device file type.
- */
- byte LF_BLK = (byte) '4';
-
- /**
- * Directory file type.
- */
- byte LF_DIR = (byte) '5';
-
- /**
- * FIFO (pipe) file type.
- */
- byte LF_FIFO = (byte) '6';
-
- /**
- * Contiguous file type.
- */
- byte LF_CONTIG = (byte) '7';
-
- /**
- * Identifies the *next* file on the tape as having a long linkname.
- */
- byte LF_GNUTYPE_LONGLINK = (byte) 'K';
-
- /**
- * Identifies the *next* file on the tape as having a long name.
- */
- byte LF_GNUTYPE_LONGNAME = (byte) 'L';
-
- /**
- * Sparse file type.
- * @since 1.1.1
- */
- byte LF_GNUTYPE_SPARSE = (byte) 'S';
-
- // See "http://www.opengroup.org/onlinepubs/009695399/utilities/pax.html#tag_04_100_13_02"
-
- /**
- * Identifies the entry as a Pax extended header.
- * @since 1.1
- */
- byte LF_PAX_EXTENDED_HEADER_LC = (byte) 'x';
-
- /**
- * Identifies the entry as a Pax extended header (SunOS tar -E).
- *
- * @since 1.1
- */
- byte LF_PAX_EXTENDED_HEADER_UC = (byte) 'X';
-
- /**
- * Identifies the entry as a Pax global extended header.
- *
- * @since 1.1
- */
- byte LF_PAX_GLOBAL_EXTENDED_HEADER = (byte) 'g';
-
- /**
- * The magic tag representing a POSIX tar archive.
- */
- String MAGIC_POSIX = "ustar\0";
- String VERSION_POSIX = "00";
-
- /**
- * The magic tag representing a GNU tar archive.
- */
- String MAGIC_GNU = "ustar ";
- // Appear to be two possible GNU versions
- String VERSION_GNU_SPACE = " \0";
- String VERSION_GNU_ZERO = "0\0";
-
- /**
- * The magic tag representing an Ant tar archive.
- *
- * @since 1.1
- */
- String MAGIC_ANT = "ustar\0";
-
- /**
- * The "version" representing an Ant tar archive.
- *
- * @since 1.1
- */
- // Does not appear to have a version, however Ant does write 8 bytes,
- // so assume the version is 2 nulls
- String VERSION_ANT = "\0\0";
-
- /**
- * The name of the GNU tar entry which contains a long name.
- */
- String GNU_LONGLINK = "././@LongLink"; // TODO rename as LONGLINK_GNU ?
-
- /**
- * The magix string used in the last four bytes of the header to
- * identify the xstar format.
- * @since 1.11
- */
- String MAGIC_XSTAR = "tar\0";
-
- /**
- * Offset inside the header for the xstar magic bytes.
- * @since 1.11
- */
- int XSTAR_MAGIC_OFFSET = 508;
-
- /**
- * Length of the XSTAR magic.
- * @since 1.11
- */
- int XSTAR_MAGIC_LEN = 4;
-
- /**
- * Length of the prefix field in xstar archives.
- *
- * @since 1.11
- */
- int PREFIXLEN_XSTAR = 131;
-
- /**
- * The length of the access time field in a xstar header buffer.
- *
- * @since 1.11
- */
- int ATIMELEN_XSTAR = 12;
-
- /**
- * The length of the created time field in a xstar header buffer.
- *
- * @since 1.11
- */
- int CTIMELEN_XSTAR = 12;
-}
diff --git a/src/org/apache/commons/compress/archivers/tar/TarUtils.java b/src/org/apache/commons/compress/archivers/tar/TarUtils.java
deleted file mode 100644
index c83bcf966ed..00000000000
--- a/src/org/apache/commons/compress/archivers/tar/TarUtils.java
+++ /dev/null
@@ -1,614 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.tar;
-
-import static org.apache.commons.compress.archivers.tar.TarConstants.CHKSUMLEN;
-import static org.apache.commons.compress.archivers.tar.TarConstants.CHKSUM_OFFSET;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.nio.ByteBuffer;
-import org.apache.commons.compress.archivers.zip.ZipEncoding;
-import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
-
-/**
- * This class provides static utility methods to work with byte streams.
- *
- * @Immutable
- */
-// CheckStyle:HideUtilityClassConstructorCheck OFF (bc)
-public class TarUtils {
-
- private static final int BYTE_MASK = 255;
-
- static final ZipEncoding DEFAULT_ENCODING =
- ZipEncodingHelper.getZipEncoding(null);
-
- /**
- * Encapsulates the algorithms used up to Commons Compress 1.3 as
- * ZipEncoding.
- */
- static final ZipEncoding FALLBACK_ENCODING = new ZipEncoding() {
- @Override
- public boolean canEncode(final String name) { return true; }
-
- @Override
- public ByteBuffer encode(final String name) {
- final int length = name.length();
- final byte[] buf = new byte[length];
-
- // copy until end of input or output is reached.
- for (int i = 0; i < length; ++i) {
- buf[i] = (byte) name.charAt(i);
- }
- return ByteBuffer.wrap(buf);
- }
-
- @Override
- public String decode(final byte[] buffer) {
- final int length = buffer.length;
- final StringBuilder result = new StringBuilder(length);
-
- for (final byte b : buffer) {
- if (b == 0) { // Trailing null
- break;
- }
- result.append((char) (b & 0xFF)); // Allow for sign-extension
- }
-
- return result.toString();
- }
- };
-
- /** Private constructor to prevent instantiation of this utility class. */
- private TarUtils(){
- }
-
- /**
- * Parse an octal string from a buffer.
- *
- *
Leading spaces are ignored.
- * The buffer must contain a trailing space or NUL,
- * and may contain an additional trailing space or NUL.
- *
- *
The input buffer is allowed to contain all NULs,
- * in which case the method returns 0L
- * (this allows for missing fields).
- *
- *
To work-around some tar implementations that insert a
- * leading NUL this method returns 0 if it detects a leading NUL
- * since Commons Compress 1.4.
- *
- * @param buffer The buffer from which to parse.
- * @param offset The offset into the buffer from which to parse.
- * @param length The maximum number of bytes to parse - must be at least 2 bytes.
- * @return The long value of the octal string.
- * @throws IllegalArgumentException if the trailing space/NUL is missing or if a invalid byte is detected.
- */
- public static long parseOctal(final byte[] buffer, final int offset, final int length) {
- long result = 0;
- int end = offset + length;
- int start = offset;
-
- if (length < 2){
- throw new IllegalArgumentException("Length "+length+" must be at least 2");
- }
-
- if (buffer[start] == 0) {
- return 0L;
- }
-
- // Skip leading spaces
- while (start < end){
- if (buffer[start] == ' '){
- start++;
- } else {
- break;
- }
- }
-
- // Trim all trailing NULs and spaces.
- // The ustar and POSIX tar specs require a trailing NUL or
- // space but some implementations use the extra digit for big
- // sizes/uids/gids ...
- byte trailer = buffer[end - 1];
- while (start < end && (trailer == 0 || trailer == ' ')) {
- end--;
- trailer = buffer[end - 1];
- }
-
- for ( ;start < end; start++) {
- final byte currentByte = buffer[start];
- // CheckStyle:MagicNumber OFF
- if (currentByte < '0' || currentByte > '7'){
- throw new IllegalArgumentException(
- exceptionMessage(buffer, offset, length, start, currentByte));
- }
- result = (result << 3) + (currentByte - '0'); // convert from ASCII
- // CheckStyle:MagicNumber ON
- }
-
- return result;
- }
-
- /**
- * Compute the value contained in a byte buffer. If the most
- * significant bit of the first byte in the buffer is set, this
- * bit is ignored and the rest of the buffer is interpreted as a
- * binary number. Otherwise, the buffer is interpreted as an
- * octal number as per the parseOctal function above.
- *
- * @param buffer The buffer from which to parse.
- * @param offset The offset into the buffer from which to parse.
- * @param length The maximum number of bytes to parse.
- * @return The long value of the octal or binary string.
- * @throws IllegalArgumentException if the trailing space/NUL is
- * missing or an invalid byte is detected in an octal number, or
- * if a binary number would exceed the size of a signed long
- * 64-bit integer.
- * @since 1.4
- */
- public static long parseOctalOrBinary(final byte[] buffer, final int offset,
- final int length) {
-
- if ((buffer[offset] & 0x80) == 0) {
- return parseOctal(buffer, offset, length);
- }
- final boolean negative = buffer[offset] == (byte) 0xff;
- if (length < 9) {
- return parseBinaryLong(buffer, offset, length, negative);
- }
- return parseBinaryBigInteger(buffer, offset, length, negative);
- }
-
- private static long parseBinaryLong(final byte[] buffer, final int offset,
- final int length,
- final boolean negative) {
- if (length >= 9) {
- throw new IllegalArgumentException("At offset " + offset + ", "
- + length + " byte binary number"
- + " exceeds maximum signed long"
- + " value");
- }
- long val = 0;
- for (int i = 1; i < length; i++) {
- val = (val << 8) + (buffer[offset + i] & 0xff);
- }
- if (negative) {
- // 2's complement
- val--;
- val ^= (long) Math.pow(2.0, (length - 1) * 8.0) - 1;
- }
- return negative ? -val : val;
- }
-
- private static long parseBinaryBigInteger(final byte[] buffer,
- final int offset,
- final int length,
- final boolean negative) {
- final byte[] remainder = new byte[length - 1];
- System.arraycopy(buffer, offset + 1, remainder, 0, length - 1);
- BigInteger val = new BigInteger(remainder);
- if (negative) {
- // 2's complement
- val = val.add(BigInteger.valueOf(-1)).not();
- }
- if (val.bitLength() > 63) {
- throw new IllegalArgumentException("At offset " + offset + ", "
- + length + " byte binary number"
- + " exceeds maximum signed long"
- + " value");
- }
- return negative ? -val.longValue() : val.longValue();
- }
-
- /**
- * Parse a boolean byte from a buffer.
- * Leading spaces and NUL are ignored.
- * The buffer may contain trailing spaces or NULs.
- *
- * @param buffer The buffer from which to parse.
- * @param offset The offset into the buffer from which to parse.
- * @return The boolean value of the bytes.
- * @throws IllegalArgumentException if an invalid byte is detected.
- */
- public static boolean parseBoolean(final byte[] buffer, final int offset) {
- return buffer[offset] == 1;
- }
-
- // Helper method to generate the exception message
- private static String exceptionMessage(final byte[] buffer, final int offset,
- final int length, final int current, final byte currentByte) {
- // default charset is good enough for an exception message,
- //
- // the alternative was to modify parseOctal and
- // parseOctalOrBinary to receive the ZipEncoding of the
- // archive (deprecating the existing public methods, of
- // course) and dealing with the fact that ZipEncoding#decode
- // can throw an IOException which parseOctal* doesn't declare
- String string = new String(buffer, offset, length);
-
- string=string.replaceAll("\0", "{NUL}"); // Replace NULs to allow string to be printed
- return "Invalid byte "+currentByte+" at offset "+(current-offset)+" in '"+string+"' len="+length;
- }
-
- /**
- * Parse an entry name from a buffer.
- * Parsing stops when a NUL is found
- * or the buffer length is reached.
- *
- * @param buffer The buffer from which to parse.
- * @param offset The offset into the buffer from which to parse.
- * @param length The maximum number of bytes to parse.
- * @return The entry name.
- */
- public static String parseName(final byte[] buffer, final int offset, final int length) {
- try {
- return parseName(buffer, offset, length, DEFAULT_ENCODING);
- } catch (final IOException ex) {
- try {
- return parseName(buffer, offset, length, FALLBACK_ENCODING);
- } catch (final IOException ex2) {
- // impossible
- throw new RuntimeException(ex2); //NOSONAR
- }
- }
- }
-
- /**
- * Parse an entry name from a buffer.
- * Parsing stops when a NUL is found
- * or the buffer length is reached.
- *
- * @param buffer The buffer from which to parse.
- * @param offset The offset into the buffer from which to parse.
- * @param length The maximum number of bytes to parse.
- * @param encoding name of the encoding to use for file names
- * @since 1.4
- * @return The entry name.
- * @throws IOException on error
- */
- public static String parseName(final byte[] buffer, final int offset,
- final int length,
- final ZipEncoding encoding)
- throws IOException {
-
- int len = 0;
- for (int i = offset; len < length && buffer[i] != 0; i++) {
- len++;
- }
- if (len > 0) {
- final byte[] b = new byte[len];
- System.arraycopy(buffer, offset, b, 0, len);
- return encoding.decode(b);
- }
- return "";
- }
-
- /**
- * Copy a name into a buffer.
- * Copies characters from the name into the buffer
- * starting at the specified offset.
- * If the buffer is longer than the name, the buffer
- * is filled with trailing NULs.
- * If the name is longer than the buffer,
- * the output is truncated.
- *
- * @param name The header name from which to copy the characters.
- * @param buf The buffer where the name is to be stored.
- * @param offset The starting offset into the buffer
- * @param length The maximum number of header bytes to copy.
- * @return The updated offset, i.e. offset + length
- */
- public static int formatNameBytes(final String name, final byte[] buf, final int offset, final int length) {
- try {
- return formatNameBytes(name, buf, offset, length, DEFAULT_ENCODING);
- } catch (final IOException ex) {
- try {
- return formatNameBytes(name, buf, offset, length,
- FALLBACK_ENCODING);
- } catch (final IOException ex2) {
- // impossible
- throw new RuntimeException(ex2); //NOSONAR
- }
- }
- }
-
- /**
- * Copy a name into a buffer.
- * Copies characters from the name into the buffer
- * starting at the specified offset.
- * If the buffer is longer than the name, the buffer
- * is filled with trailing NULs.
- * If the name is longer than the buffer,
- * the output is truncated.
- *
- * @param name The header name from which to copy the characters.
- * @param buf The buffer where the name is to be stored.
- * @param offset The starting offset into the buffer
- * @param length The maximum number of header bytes to copy.
- * @param encoding name of the encoding to use for file names
- * @since 1.4
- * @return The updated offset, i.e. offset + length
- * @throws IOException on error
- */
- public static int formatNameBytes(final String name, final byte[] buf, final int offset,
- final int length,
- final ZipEncoding encoding)
- throws IOException {
- int len = name.length();
- ByteBuffer b = encoding.encode(name);
- while (b.limit() > length && len > 0) {
- b = encoding.encode(name.substring(0, --len));
- }
- final int limit = b.limit() - b.position();
- System.arraycopy(b.array(), b.arrayOffset(), buf, offset, limit);
-
- // Pad any remaining output bytes with NUL
- for (int i = limit; i < length; ++i) {
- buf[offset + i] = 0;
- }
-
- return offset + length;
- }
-
- /**
- * Fill buffer with unsigned octal number, padded with leading zeroes.
- *
- * @param value number to convert to octal - treated as unsigned
- * @param buffer destination buffer
- * @param offset starting offset in buffer
- * @param length length of buffer to fill
- * @throws IllegalArgumentException if the value will not fit in the buffer
- */
- public static void formatUnsignedOctalString(final long value, final byte[] buffer,
- final int offset, final int length) {
- int remaining = length;
- remaining--;
- if (value == 0) {
- buffer[offset + remaining--] = (byte) '0';
- } else {
- long val = value;
- for (; remaining >= 0 && val != 0; --remaining) {
- // CheckStyle:MagicNumber OFF
- buffer[offset + remaining] = (byte) ((byte) '0' + (byte) (val & 7));
- val = val >>> 3;
- // CheckStyle:MagicNumber ON
- }
- if (val != 0){
- throw new IllegalArgumentException
- (value+"="+Long.toOctalString(value)+ " will not fit in octal number buffer of length "+length);
- }
- }
-
- for (; remaining >= 0; --remaining) { // leading zeros
- buffer[offset + remaining] = (byte) '0';
- }
- }
-
- /**
- * Write an octal integer into a buffer.
- *
- * Uses {@link #formatUnsignedOctalString} to format
- * the value as an octal string with leading zeros.
- * The converted number is followed by space and NUL
- *
- * @param value The value to write
- * @param buf The buffer to receive the output
- * @param offset The starting offset into the buffer
- * @param length The size of the output buffer
- * @return The updated offset, i.e offset+length
- * @throws IllegalArgumentException if the value (and trailer) will not fit in the buffer
- */
- public static int formatOctalBytes(final long value, final byte[] buf, final int offset, final int length) {
-
- int idx=length-2; // For space and trailing null
- formatUnsignedOctalString(value, buf, offset, idx);
-
- buf[offset + idx++] = (byte) ' '; // Trailing space
- buf[offset + idx] = 0; // Trailing null
-
- return offset + length;
- }
-
- /**
- * Write an octal long integer into a buffer.
- *
- * Uses {@link #formatUnsignedOctalString} to format
- * the value as an octal string with leading zeros.
- * The converted number is followed by a space.
- *
- * @param value The value to write as octal
- * @param buf The destinationbuffer.
- * @param offset The starting offset into the buffer.
- * @param length The length of the buffer
- * @return The updated offset
- * @throws IllegalArgumentException if the value (and trailer) will not fit in the buffer
- */
- public static int formatLongOctalBytes(final long value, final byte[] buf, final int offset, final int length) {
-
- final int idx=length-1; // For space
-
- formatUnsignedOctalString(value, buf, offset, idx);
- buf[offset + idx] = (byte) ' '; // Trailing space
-
- return offset + length;
- }
-
- /**
- * Write an long integer into a buffer as an octal string if this
- * will fit, or as a binary number otherwise.
- *
- * Uses {@link #formatUnsignedOctalString} to format
- * the value as an octal string with leading zeros.
- * The converted number is followed by a space.
- *
- * @param value The value to write into the buffer.
- * @param buf The destination buffer.
- * @param offset The starting offset into the buffer.
- * @param length The length of the buffer.
- * @return The updated offset.
- * @throws IllegalArgumentException if the value (and trailer)
- * will not fit in the buffer.
- * @since 1.4
- */
- public static int formatLongOctalOrBinaryBytes(
- final long value, final byte[] buf, final int offset, final int length) {
-
- // Check whether we are dealing with UID/GID or SIZE field
- final long maxAsOctalChar = length == TarConstants.UIDLEN ? TarConstants.MAXID : TarConstants.MAXSIZE;
-
- final boolean negative = value < 0;
- if (!negative && value <= maxAsOctalChar) { // OK to store as octal chars
- return formatLongOctalBytes(value, buf, offset, length);
- }
-
- if (length < 9) {
- formatLongBinary(value, buf, offset, length, negative);
- } else {
- formatBigIntegerBinary(value, buf, offset, length, negative);
- }
-
- buf[offset] = (byte) (negative ? 0xff : 0x80);
- return offset + length;
- }
-
- private static void formatLongBinary(final long value, final byte[] buf,
- final int offset, final int length,
- final boolean negative) {
- final int bits = (length - 1) * 8;
- final long max = 1L << bits;
- long val = Math.abs(value); // Long.MIN_VALUE stays Long.MIN_VALUE
- if (val < 0 || val >= max) {
- throw new IllegalArgumentException("Value " + value +
- " is too large for " + length + " byte field.");
- }
- if (negative) {
- val ^= max - 1;
- val++;
- val |= 0xffL << bits;
- }
- for (int i = offset + length - 1; i >= offset; i--) {
- buf[i] = (byte) val;
- val >>= 8;
- }
- }
-
- private static void formatBigIntegerBinary(final long value, final byte[] buf,
- final int offset,
- final int length,
- final boolean negative) {
- final BigInteger val = BigInteger.valueOf(value);
- final byte[] b = val.toByteArray();
- final int len = b.length;
- if (len > length - 1) {
- throw new IllegalArgumentException("Value " + value +
- " is too large for " + length + " byte field.");
- }
- final int off = offset + length - len;
- System.arraycopy(b, 0, buf, off, len);
- final byte fill = (byte) (negative ? 0xff : 0);
- for (int i = offset + 1; i < off; i++) {
- buf[i] = fill;
- }
- }
-
- /**
- * Writes an octal value into a buffer.
- *
- * Uses {@link #formatUnsignedOctalString} to format
- * the value as an octal string with leading zeros.
- * The converted number is followed by NUL and then space.
- *
- * @param value The value to convert
- * @param buf The destination buffer
- * @param offset The starting offset into the buffer.
- * @param length The size of the buffer.
- * @return The updated value of offset, i.e. offset+length
- * @throws IllegalArgumentException if the value (and trailer) will not fit in the buffer
- */
- public static int formatCheckSumOctalBytes(final long value, final byte[] buf, final int offset, final int length) {
-
- int idx=length-2; // for NUL and space
- formatUnsignedOctalString(value, buf, offset, idx);
-
- buf[offset + idx++] = 0; // Trailing null
- buf[offset + idx] = (byte) ' '; // Trailing space
-
- return offset + length;
- }
-
- /**
- * Compute the checksum of a tar entry header.
- *
- * @param buf The tar entry's header buffer.
- * @return The computed checksum.
- */
- public static long computeCheckSum(final byte[] buf) {
- long sum = 0;
-
- for (final byte element : buf) {
- sum += BYTE_MASK & element;
- }
-
- return sum;
- }
-
- /**
- * Wikipedia says:
- *
- * The checksum is calculated by taking the sum of the unsigned byte values
- * of the header block with the eight checksum bytes taken to be ascii
- * spaces (decimal value 32). It is stored as a six digit octal number with
- * leading zeroes followed by a NUL and then a space. Various
- * implementations do not adhere to this format. For better compatibility,
- * ignore leading and trailing whitespace, and get the first six digits. In
- * addition, some historic tar implementations treated bytes as signed.
- * Implementations typically calculate the checksum both ways, and treat it
- * as good if either the signed or unsigned sum matches the included
- * checksum.
- *
- *
- * The return value of this method should be treated as a best-effort
- * heuristic rather than an absolute and final truth. The checksum
- * verification logic may well evolve over time as more special cases
- * are encountered.
- *
- * @param header tar header
- * @return whether the checksum is reasonably good
- * @see COMPRESS-191
- * @since 1.5
- */
- public static boolean verifyCheckSum(final byte[] header) {
- final long storedSum = parseOctal(header, CHKSUM_OFFSET, CHKSUMLEN);
- long unsignedSum = 0;
- long signedSum = 0;
-
- for (int i = 0; i < header.length; i++) {
- byte b = header[i];
- if (CHKSUM_OFFSET <= i && i < CHKSUM_OFFSET + CHKSUMLEN) {
- b = ' ';
- }
- unsignedSum += 0xff & b;
- signedSum += b;
- }
- return storedSum == unsignedSum || storedSum == signedSum;
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/tar/package.html b/src/org/apache/commons/compress/archivers/tar/package.html
deleted file mode 100644
index 141f33b610e..00000000000
--- a/src/org/apache/commons/compress/archivers/tar/package.html
+++ /dev/null
@@ -1,30 +0,0 @@
-
-
-
-
Provides stream classes for reading and writing archives using
- the TAR format.
-
-
There are many different format dialects that call themselves
- TAR. The classes of this package can read and write archives in
- the traditional pre-POSIX ustar format and support GNU
- specific extensions for long filenames that GNU tar itself by
- now refers to as oldgnu.
-
-
diff --git a/src/org/apache/commons/compress/archivers/zip/AbstractUnicodeExtraField.java b/src/org/apache/commons/compress/archivers/zip/AbstractUnicodeExtraField.java
deleted file mode 100644
index 846c9e1e94b..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/AbstractUnicodeExtraField.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.UnsupportedEncodingException;
-import java.util.zip.CRC32;
-import java.util.zip.ZipException;
-
-import org.apache.commons.compress.utils.CharsetNames;
-
-/**
- * A common base class for Unicode extra information extra fields.
- * @NotThreadSafe
- */
-public abstract class AbstractUnicodeExtraField implements ZipExtraField {
- private long nameCRC32;
- private byte[] unicodeName;
- private byte[] data;
-
- protected AbstractUnicodeExtraField() {
- }
-
- /**
- * Assemble as unicode extension from the name/comment and
- * encoding of the original zip entry.
- *
- * @param text The file name or comment.
- * @param bytes The encoded of the filename or comment in the zip
- * file.
- * @param off The offset of the encoded filename or comment in
- * bytes.
- * @param len The length of the encoded filename or commentin
- * bytes.
- */
- protected AbstractUnicodeExtraField(final String text, final byte[] bytes, final int off, final int len) {
- final CRC32 crc32 = new CRC32();
- crc32.update(bytes, off, len);
- nameCRC32 = crc32.getValue();
-
- try {
- unicodeName = text.getBytes(CharsetNames.UTF_8);
- } catch (final UnsupportedEncodingException e) {
- throw new RuntimeException("FATAL: UTF-8 encoding not supported.", e); //NOSONAR
- }
- }
-
- /**
- * Assemble as unicode extension from the name/comment and
- * encoding of the original zip entry.
- *
- * @param text The file name or comment.
- * @param bytes The encoded of the filename or comment in the zip
- * file.
- */
- protected AbstractUnicodeExtraField(final String text, final byte[] bytes) {
- this(text, bytes, 0, bytes.length);
- }
-
- private void assembleData() {
- if (unicodeName == null) {
- return;
- }
-
- data = new byte[5 + unicodeName.length];
- // version 1
- data[0] = 0x01;
- System.arraycopy(ZipLong.getBytes(nameCRC32), 0, data, 1, 4);
- System.arraycopy(unicodeName, 0, data, 5, unicodeName.length);
- }
-
- /**
- * @return The CRC32 checksum of the filename or comment as
- * encoded in the central directory of the zip file.
- */
- public long getNameCRC32() {
- return nameCRC32;
- }
-
- /**
- * @param nameCRC32 The CRC32 checksum of the filename as encoded
- * in the central directory of the zip file to set.
- */
- public void setNameCRC32(final long nameCRC32) {
- this.nameCRC32 = nameCRC32;
- data = null;
- }
-
- /**
- * @return The UTF-8 encoded name.
- */
- public byte[] getUnicodeName() {
- byte[] b = null;
- if (unicodeName != null) {
- b = new byte[unicodeName.length];
- System.arraycopy(unicodeName, 0, b, 0, b.length);
- }
- return b;
- }
-
- /**
- * @param unicodeName The UTF-8 encoded name to set.
- */
- public void setUnicodeName(final byte[] unicodeName) {
- if (unicodeName != null) {
- this.unicodeName = new byte[unicodeName.length];
- System.arraycopy(unicodeName, 0, this.unicodeName, 0,
- unicodeName.length);
- } else {
- this.unicodeName = null;
- }
- data = null;
- }
-
- @Override
- public byte[] getCentralDirectoryData() {
- if (data == null) {
- this.assembleData();
- }
- byte[] b = null;
- if (data != null) {
- b = new byte[data.length];
- System.arraycopy(data, 0, b, 0, b.length);
- }
- return b;
- }
-
- @Override
- public ZipShort getCentralDirectoryLength() {
- if (data == null) {
- assembleData();
- }
- return new ZipShort(data != null ? data.length : 0);
- }
-
- @Override
- public byte[] getLocalFileDataData() {
- return getCentralDirectoryData();
- }
-
- @Override
- public ZipShort getLocalFileDataLength() {
- return getCentralDirectoryLength();
- }
-
- @Override
- public void parseFromLocalFileData(final byte[] buffer, final int offset, final int length)
- throws ZipException {
-
- if (length < 5) {
- throw new ZipException("UniCode path extra data must have at least 5 bytes.");
- }
-
- final int version = buffer[offset];
-
- if (version != 0x01) {
- throw new ZipException("Unsupported version [" + version
- + "] for UniCode path extra data.");
- }
-
- nameCRC32 = ZipLong.getValue(buffer, offset + 1);
- unicodeName = new byte[length - 5];
- System.arraycopy(buffer, offset + 5, unicodeName, 0, length - 5);
- data = null;
- }
-
- /**
- * Doesn't do anything special since this class always uses the
- * same data in central directory and local file data.
- */
- @Override
- public void parseFromCentralDirectoryData(final byte[] buffer, final int offset,
- final int length)
- throws ZipException {
- parseFromLocalFileData(buffer, offset, length);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/AsiExtraField.java b/src/org/apache/commons/compress/archivers/zip/AsiExtraField.java
deleted file mode 100644
index e4afa95cdb4..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/AsiExtraField.java
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.util.zip.CRC32;
-import java.util.zip.ZipException;
-
-/**
- * Adds Unix file permission and UID/GID fields as well as symbolic
- * link handling.
- *
- *
This class uses the ASi extra field in the format:
- *
- * Value Size Description
- * ----- ---- -----------
- * (Unix3) 0x756e Short tag for this extra block type
- * TSize Short total data size for this block
- * CRC Long CRC-32 of the remaining data
- * Mode Short file permissions
- * SizDev Long symlink'd size OR major/minor dev num
- * UID Short user ID
- * GID Short group ID
- * (var.) variable symbolic link filename
- *
Short is two bytes and Long is four bytes in big endian byte and
- * word order, device numbers are currently not supported.
- * @NotThreadSafe
- *
- *
Since the documentation this class is based upon doesn't mention
- * the character encoding of the file name at all, it is assumed that
- * it uses the current platform's default encoding.
- */
-public class AsiExtraField implements ZipExtraField, UnixStat, Cloneable {
-
- private static final ZipShort HEADER_ID = new ZipShort(0x756E);
- private static final int WORD = 4;
- /**
- * Standard Unix stat(2) file mode.
- */
- private int mode = 0;
- /**
- * User ID.
- */
- private int uid = 0;
- /**
- * Group ID.
- */
- private int gid = 0;
- /**
- * File this entry points to, if it is a symbolic link.
- *
- *
empty string - if entry is not a symbolic link.
- */
- private String link = "";
- /**
- * Is this an entry for a directory?
- */
- private boolean dirFlag = false;
-
- /**
- * Instance used to calculate checksums.
- */
- private CRC32 crc = new CRC32();
-
- /** Constructor for AsiExtraField. */
- public AsiExtraField() {
- }
-
- /**
- * The Header-ID.
- * @return the value for the header id for this extrafield
- */
- @Override
- public ZipShort getHeaderId() {
- return HEADER_ID;
- }
-
- /**
- * Length of the extra field in the local file data - without
- * Header-ID or length specifier.
- * @return a ZipShort for the length of the data of this extra field
- */
- @Override
- public ZipShort getLocalFileDataLength() {
- return new ZipShort(WORD // CRC
- + 2 // Mode
- + WORD // SizDev
- + 2 // UID
- + 2 // GID
- + getLinkedFile().getBytes().length);
- // Uses default charset - see class Javadoc
- }
-
- /**
- * Delegate to local file data.
- * @return the centralDirectory length
- */
- @Override
- public ZipShort getCentralDirectoryLength() {
- return getLocalFileDataLength();
- }
-
- /**
- * The actual data to put into local file data - without Header-ID
- * or length specifier.
- * @return get the data
- */
- @Override
- public byte[] getLocalFileDataData() {
- // CRC will be added later
- final byte[] data = new byte[getLocalFileDataLength().getValue() - WORD];
- System.arraycopy(ZipShort.getBytes(getMode()), 0, data, 0, 2);
-
- final byte[] linkArray = getLinkedFile().getBytes(); // Uses default charset - see class Javadoc
- // CheckStyle:MagicNumber OFF
- System.arraycopy(ZipLong.getBytes(linkArray.length),
- 0, data, 2, WORD);
-
- System.arraycopy(ZipShort.getBytes(getUserId()),
- 0, data, 6, 2);
- System.arraycopy(ZipShort.getBytes(getGroupId()),
- 0, data, 8, 2);
-
- System.arraycopy(linkArray, 0, data, 10, linkArray.length);
- // CheckStyle:MagicNumber ON
-
- crc.reset();
- crc.update(data);
- final long checksum = crc.getValue();
-
- final byte[] result = new byte[data.length + WORD];
- System.arraycopy(ZipLong.getBytes(checksum), 0, result, 0, WORD);
- System.arraycopy(data, 0, result, WORD, data.length);
- return result;
- }
-
- /**
- * Delegate to local file data.
- * @return the local file data
- */
- @Override
- public byte[] getCentralDirectoryData() {
- return getLocalFileDataData();
- }
-
- /**
- * Set the user id.
- * @param uid the user id
- */
- public void setUserId(final int uid) {
- this.uid = uid;
- }
-
- /**
- * Get the user id.
- * @return the user id
- */
- public int getUserId() {
- return uid;
- }
-
- /**
- * Set the group id.
- * @param gid the group id
- */
- public void setGroupId(final int gid) {
- this.gid = gid;
- }
-
- /**
- * Get the group id.
- * @return the group id
- */
- public int getGroupId() {
- return gid;
- }
-
- /**
- * Indicate that this entry is a symbolic link to the given filename.
- *
- * @param name Name of the file this entry links to, empty String
- * if it is not a symbolic link.
- */
- public void setLinkedFile(final String name) {
- link = name;
- mode = getMode(mode);
- }
-
- /**
- * Name of linked file
- *
- * @return name of the file this entry links to if it is a
- * symbolic link, the empty string otherwise.
- */
- public String getLinkedFile() {
- return link;
- }
-
- /**
- * Is this entry a symbolic link?
- * @return true if this is a symbolic link
- */
- public boolean isLink() {
- return getLinkedFile().length() != 0;
- }
-
- /**
- * File mode of this file.
- * @param mode the file mode
- */
- public void setMode(final int mode) {
- this.mode = getMode(mode);
- }
-
- /**
- * File mode of this file.
- * @return the file mode
- */
- public int getMode() {
- return mode;
- }
-
- /**
- * Indicate whether this entry is a directory.
- * @param dirFlag if true, this entry is a directory
- */
- public void setDirectory(final boolean dirFlag) {
- this.dirFlag = dirFlag;
- mode = getMode(mode);
- }
-
- /**
- * Is this entry a directory?
- * @return true if this entry is a directory
- */
- public boolean isDirectory() {
- return dirFlag && !isLink();
- }
-
- /**
- * Populate data from this array as if it was in local file data.
- * @param data an array of bytes
- * @param offset the start offset
- * @param length the number of bytes in the array from offset
- * @throws ZipException on error
- */
- @Override
- public void parseFromLocalFileData(final byte[] data, final int offset, final int length)
- throws ZipException {
-
- final long givenChecksum = ZipLong.getValue(data, offset);
- final byte[] tmp = new byte[length - WORD];
- System.arraycopy(data, offset + WORD, tmp, 0, length - WORD);
- crc.reset();
- crc.update(tmp);
- final long realChecksum = crc.getValue();
- if (givenChecksum != realChecksum) {
- throw new ZipException("bad CRC checksum "
- + Long.toHexString(givenChecksum)
- + " instead of "
- + Long.toHexString(realChecksum));
- }
-
- final int newMode = ZipShort.getValue(tmp, 0);
- // CheckStyle:MagicNumber OFF
- final byte[] linkArray = new byte[(int) ZipLong.getValue(tmp, 2)];
- uid = ZipShort.getValue(tmp, 6);
- gid = ZipShort.getValue(tmp, 8);
-
- if (linkArray.length == 0) {
- link = "";
- } else {
- System.arraycopy(tmp, 10, linkArray, 0, linkArray.length);
- link = new String(linkArray); // Uses default charset - see class Javadoc
- }
- // CheckStyle:MagicNumber ON
- setDirectory((newMode & DIR_FLAG) != 0);
- setMode(newMode);
- }
-
- /**
- * Doesn't do anything special since this class always uses the
- * same data in central directory and local file data.
- */
- @Override
- public void parseFromCentralDirectoryData(final byte[] buffer, final int offset,
- final int length)
- throws ZipException {
- parseFromLocalFileData(buffer, offset, length);
- }
-
- /**
- * Get the file mode for given permissions with the correct file type.
- * @param mode the mode
- * @return the type with the mode
- */
- protected int getMode(final int mode) {
- int type = FILE_FLAG;
- if (isLink()) {
- type = LINK_FLAG;
- } else if (isDirectory()) {
- type = DIR_FLAG;
- }
- return type | (mode & PERM_MASK);
- }
-
- @Override
- public Object clone() {
- try {
- final AsiExtraField cloned = (AsiExtraField) super.clone();
- cloned.crc = new CRC32();
- return cloned;
- } catch (final CloneNotSupportedException cnfe) {
- // impossible
- throw new RuntimeException(cnfe); //NOSONAR
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/BinaryTree.java b/src/org/apache/commons/compress/archivers/zip/BinaryTree.java
deleted file mode 100644
index 9b3c3775625..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/BinaryTree.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-
-/**
- * Binary tree of positive values.
- *
- * @author Emmanuel Bourg
- * @since 1.7
- */
-class BinaryTree {
-
- /** Value in the array indicating an undefined node */
- private static final int UNDEFINED = -1;
-
- /** Value in the array indicating a non leaf node */
- private static final int NODE = -2;
-
- /**
- * The array representing the binary tree. The root is at index 0,
- * the left children are at 2*i+1 and the right children at 2*i+2.
- */
- private final int[] tree;
-
- public BinaryTree(final int depth) {
- tree = new int[(1 << (depth + 1)) - 1];
- Arrays.fill(tree, UNDEFINED);
- }
-
- /**
- * Adds a leaf to the tree.
- *
- * @param node the index of the node where the path is appended
- * @param path the path to the leaf (bits are parsed from the right to the left)
- * @param depth the number of nodes in the path
- * @param value the value of the leaf (must be positive)
- */
- public void addLeaf(final int node, final int path, final int depth, final int value) {
- if (depth == 0) {
- // end of the path reached, add the value to the current node
- if (tree[node] == UNDEFINED) {
- tree[node] = value;
- } else {
- throw new IllegalArgumentException("Tree value at index " + node + " has already been assigned (" + tree[node] + ")");
- }
- } else {
- // mark the current node as a non leaf node
- tree[node] = NODE;
-
- // move down the path recursively
- final int nextChild = 2 * node + 1 + (path & 1);
- addLeaf(nextChild, path >>> 1, depth - 1, value);
- }
- }
-
- /**
- * Reads a value from the specified bit stream.
- *
- * @param stream
- * @return the value decoded, or -1 if the end of the stream is reached
- */
- public int read(final BitStream stream) throws IOException {
- int currentIndex = 0;
-
- while (true) {
- final int bit = stream.nextBit();
- if (bit == -1) {
- return -1;
- }
-
- final int childIndex = 2 * currentIndex + 1 + bit;
- final int value = tree[childIndex];
- if (value == NODE) {
- // consume the next bit
- currentIndex = childIndex;
- } else if (value != UNDEFINED) {
- return value;
- } else {
- throw new IOException("The child " + bit + " of node at index " + currentIndex + " is not defined");
- }
- }
- }
-
-
- /**
- * Decodes the packed binary tree from the specified stream.
- */
- static BinaryTree decode(final InputStream in, final int totalNumberOfValues) throws IOException {
- // the first byte contains the size of the structure minus one
- final int size = in.read() + 1;
- if (size == 0) {
- throw new IOException("Cannot read the size of the encoded tree, unexpected end of stream");
- }
-
- final byte[] encodedTree = new byte[size];
- new DataInputStream(in).readFully(encodedTree);
-
- /** The maximum bit length for a value (16 or lower) */
- int maxLength = 0;
-
- final int[] originalBitLengths = new int[totalNumberOfValues];
- int pos = 0;
- for (final byte b : encodedTree) {
- // each byte encodes the number of values (upper 4 bits) for a bit length (lower 4 bits)
- final int numberOfValues = ((b & 0xF0) >> 4) + 1;
- final int bitLength = (b & 0x0F) + 1;
-
- for (int j = 0; j < numberOfValues; j++) {
- originalBitLengths[pos++] = bitLength;
- }
-
- maxLength = Math.max(maxLength, bitLength);
- }
-
- // sort the array of bit lengths and memorize the permutation used to restore the order of the codes
- final int[] permutation = new int[originalBitLengths.length];
- for (int k = 0; k < permutation.length; k++) {
- permutation[k] = k;
- }
-
- int c = 0;
- final int[] sortedBitLengths = new int[originalBitLengths.length];
- for (int k = 0; k < originalBitLengths.length; k++) {
- // iterate over the values
- for (int l = 0; l < originalBitLengths.length; l++) {
- // look for the value in the original array
- if (originalBitLengths[l] == k) {
- // put the value at the current position in the sorted array...
- sortedBitLengths[c] = k;
-
- // ...and memorize the permutation
- permutation[c] = l;
-
- c++;
- }
- }
- }
-
- // decode the values of the tree
- int code = 0;
- int codeIncrement = 0;
- int lastBitLength = 0;
-
- final int[] codes = new int[totalNumberOfValues];
-
- for (int i = totalNumberOfValues - 1; i >= 0; i--) {
- code = code + codeIncrement;
- if (sortedBitLengths[i] != lastBitLength) {
- lastBitLength = sortedBitLengths[i];
- codeIncrement = 1 << (16 - lastBitLength);
- }
- codes[permutation[i]] = code;
- }
-
- // build the tree
- final BinaryTree tree = new BinaryTree(maxLength);
-
- for (int k = 0; k < codes.length; k++) {
- final int bitLength = originalBitLengths[k];
- if (bitLength > 0) {
- tree.addLeaf(0, Integer.reverse(codes[k] << 16), bitLength, k);
- }
- }
-
- return tree;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/BitStream.java b/src/org/apache/commons/compress/archivers/zip/BitStream.java
deleted file mode 100644
index fb737b7976d..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/BitStream.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.IOException;
-import java.io.InputStream;
-import org.apache.commons.compress.utils.BitInputStream;
-import java.nio.ByteOrder;
-
-/**
- * Iterates over the bits of an InputStream. For each byte the bits
- * are read from the right to the left.
- *
- * @since 1.7
- */
-class BitStream extends BitInputStream {
-
- BitStream(final InputStream in) {
- super(in, ByteOrder.LITTLE_ENDIAN);
- }
-
- /**
- * Returns the next bit.
- *
- * @return The next bit (0 or 1) or -1 if the end of the stream has been reached
- */
- int nextBit() throws IOException {
- return (int) readBits(1);
- }
-
- /**
- * Returns the integer value formed by the n next bits (up to 8 bits).
- *
- * @param n the number of bits read (up to 8)
- * @return The value formed by the n bits, or -1 if the end of the stream has been reached
- */
- long nextBits(final int n) throws IOException {
- return readBits(n);
- }
-
- int nextByte() throws IOException {
- return (int) readBits(8);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/CharsetAccessor.java b/src/org/apache/commons/compress/archivers/zip/CharsetAccessor.java
deleted file mode 100644
index e5342bec751..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/CharsetAccessor.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import java.nio.charset.Charset;
-
-/**
- * An interface added to allow access to the character set associated with an {@link NioZipEncoding},
- * without requiring a new method to be added to {@link ZipEncoding}.
- *
- * This avoids introducing a
- * potentially breaking change, or making {@link NioZipEncoding} a public class.
- *
- * @since 1.15
- */
-public interface CharsetAccessor {
-
- /**
- * Provides access to the character set associated with an object.
- *
- * This allows nio oriented code to use more natural character encoding/decoding methods,
- * whilst allowing existing code to continue to rely on special-case error handling for UTF-8.
- *
- * @return the character set associated with this object
- */
- Charset getCharset();
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/CircularBuffer.java b/src/org/apache/commons/compress/archivers/zip/CircularBuffer.java
deleted file mode 100644
index 8502e46e5ad..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/CircularBuffer.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-/**
- * Circular byte buffer.
- *
- * @author Emmanuel Bourg
- * @since 1.7
- */
-class CircularBuffer {
-
- /** Size of the buffer */
- private final int size;
-
- /** The buffer */
- private final byte[] buffer;
-
- /** Index of the next data to be read from the buffer */
- private int readIndex;
-
- /** Index of the next data written in the buffer */
- private int writeIndex;
-
- CircularBuffer(final int size) {
- this.size = size;
- buffer = new byte[size];
- }
-
- /**
- * Tells if a new byte can be read from the buffer.
- */
- public boolean available() {
- return readIndex != writeIndex;
- }
-
- /**
- * Writes a byte to the buffer.
- */
- public void put(final int value) {
- buffer[writeIndex] = (byte) value;
- writeIndex = (writeIndex + 1) % size;
- }
-
- /**
- * Reads a byte from the buffer.
- */
- public int get() {
- if (available()) {
- final int value = buffer[readIndex];
- readIndex = (readIndex + 1) % size;
- return value & 0xFF;
- }
- return -1;
- }
-
- /**
- * Copy a previous interval in the buffer to the current position.
- *
- * @param distance the distance from the current write position
- * @param length the number of bytes to copy
- */
- public void copy(final int distance, final int length) {
- final int pos1 = writeIndex - distance;
- final int pos2 = pos1 + length;
- for (int i = pos1; i < pos2; i++) {
- buffer[writeIndex] = buffer[(i + size) % size];
- writeIndex = (writeIndex + 1) % size;
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ExplodingInputStream.java b/src/org/apache/commons/compress/archivers/zip/ExplodingInputStream.java
deleted file mode 100644
index 70eb3083382..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ExplodingInputStream.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * The implode compression method was added to PKZIP 1.01 released in 1989.
- * It was then dropped from PKZIP 2.0 released in 1993 in favor of the deflate
- * method.
- *
- * The algorithm is described in the ZIP File Format Specification.
- *
- * @see ZIP File Format Specification
- *
- * @author Emmanuel Bourg
- * @since 1.7
- */
-class ExplodingInputStream extends InputStream implements InputStreamStatistics {
-
- /** The underlying stream containing the compressed data */
- private final InputStream in;
-
- /** The stream of bits read from the input stream */
- private BitStream bits;
-
- /** The size of the sliding dictionary (4K or 8K) */
- private final int dictionarySize;
-
- /** The number of Shannon-Fano trees (2 or 3) */
- private final int numberOfTrees;
-
- private final int minimumMatchLength;
-
- /** The binary tree containing the 256 encoded literals (null when only two trees are used) */
- private BinaryTree literalTree;
-
- /** The binary tree containing the 64 encoded lengths */
- private BinaryTree lengthTree;
-
- /** The binary tree containing the 64 encoded distances */
- private BinaryTree distanceTree;
-
- /** Output buffer holding the decompressed data */
- private final CircularBuffer buffer = new CircularBuffer(32 * 1024);
-
- private long uncompressedCount = 0;
-
- private long treeSizes = 0;
-
- /**
- * Create a new stream decompressing the content of the specified stream
- * using the explode algorithm.
- *
- * @param dictionarySize the size of the sliding dictionary (4096 or 8192)
- * @param numberOfTrees the number of trees (2 or 3)
- * @param in the compressed data stream
- */
- public ExplodingInputStream(final int dictionarySize, final int numberOfTrees, final InputStream in) {
- if (dictionarySize != 4096 && dictionarySize != 8192) {
- throw new IllegalArgumentException("The dictionary size must be 4096 or 8192");
- }
- if (numberOfTrees != 2 && numberOfTrees != 3) {
- throw new IllegalArgumentException("The number of trees must be 2 or 3");
- }
- this.dictionarySize = dictionarySize;
- this.numberOfTrees = numberOfTrees;
- this.minimumMatchLength = numberOfTrees;
- this.in = in;
- }
-
- /**
- * Reads the encoded binary trees and prepares the bit stream.
- *
- * @throws IOException
- */
- private void init() throws IOException {
- if (bits == null) {
- try (CountingInputStream i = new CountingInputStream(in) {
- @Override
- public void close() {
- // we do not want to close in
- }
- }) {
- if (numberOfTrees == 3) {
- literalTree = BinaryTree.decode(i, 256);
- }
-
- lengthTree = BinaryTree.decode(i, 64);
- distanceTree = BinaryTree.decode(i, 64);
- treeSizes += i.getBytesRead();
- }
-
- bits = new BitStream(in);
- }
- }
-
- @Override
- public int read() throws IOException {
- if (!buffer.available()) {
- fillBuffer();
- }
-
- final int ret = buffer.get();
- if (ret > -1) {
- uncompressedCount++;
- }
- return ret;
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return bits.getBytesRead() + treeSizes;
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getUncompressedCount() {
- return uncompressedCount;
- }
-
- /**
- * @since 1.17
- */
- @Override
- public void close() throws IOException {
- in.close();
- }
-
- /**
- * Fill the sliding dictionary with more data.
- * @throws IOException
- */
- private void fillBuffer() throws IOException {
- init();
-
- final int bit = bits.nextBit();
- if (bit == 1) {
- // literal value
- int literal;
- if (literalTree != null) {
- literal = literalTree.read(bits);
- } else {
- literal = bits.nextByte();
- }
-
- if (literal == -1) {
- // end of stream reached, nothing left to decode
- return;
- }
-
- buffer.put(literal);
-
- } else if (bit == 0) {
- // back reference
- final int distanceLowSize = dictionarySize == 4096 ? 6 : 7;
- final int distanceLow = (int) bits.nextBits(distanceLowSize);
- final int distanceHigh = distanceTree.read(bits);
- if (distanceHigh == -1 && distanceLow <= 0) {
- // end of stream reached, nothing left to decode
- return;
- }
- final int distance = distanceHigh << distanceLowSize | distanceLow;
-
- int length = lengthTree.read(bits);
- if (length == 63) {
- length += bits.nextBits(8);
- }
- length += minimumMatchLength;
-
- buffer.copy(distance + 1, length);
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ExtraFieldUtils.java b/src/org/apache/commons/compress/archivers/zip/ExtraFieldUtils.java
deleted file mode 100644
index eed6cb9e3a4..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ExtraFieldUtils.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.zip.ZipException;
-
-/**
- * ZipExtraField related methods
- * @NotThreadSafe because the HashMap is not synch.
- */
-// CheckStyle:HideUtilityClassConstructorCheck OFF (bc)
-public class ExtraFieldUtils {
-
- private static final int WORD = 4;
-
- /**
- * Static registry of known extra fields.
- */
- private static final Map> implementations;
-
- static {
- implementations = new ConcurrentHashMap<>();
- register(AsiExtraField.class);
- register(X5455_ExtendedTimestamp.class);
- register(X7875_NewUnix.class);
- register(JarMarker.class);
- register(UnicodePathExtraField.class);
- register(UnicodeCommentExtraField.class);
- register(Zip64ExtendedInformationExtraField.class);
- register(X000A_NTFS.class);
- register(X0014_X509Certificates.class);
- register(X0015_CertificateIdForFile.class);
- register(X0016_CertificateIdForCentralDirectory.class);
- register(X0017_StrongEncryptionHeader.class);
- register(X0019_EncryptionRecipientCertificateList.class);
- register(ResourceAlignmentExtraField.class);
- }
-
- /**
- * Register a ZipExtraField implementation.
- *
- *
The given class must have a no-arg constructor and implement
- * the {@link ZipExtraField ZipExtraField interface}.
- * @param c the class to register
- */
- public static void register(final Class> c) {
- try {
- final ZipExtraField ze = (ZipExtraField) c.newInstance();
- implementations.put(ze.getHeaderId(), c);
- } catch (final ClassCastException cc) {
- throw new RuntimeException(c + " doesn\'t implement ZipExtraField"); //NOSONAR
- } catch (final InstantiationException ie) {
- throw new RuntimeException(c + " is not a concrete class"); //NOSONAR
- } catch (final IllegalAccessException ie) {
- throw new RuntimeException(c + "\'s no-arg constructor is not public"); //NOSONAR
- }
- }
-
- /**
- * Create an instance of the appropriate ExtraField, falls back to
- * {@link UnrecognizedExtraField UnrecognizedExtraField}.
- * @param headerId the header identifier
- * @return an instance of the appropriate ExtraField
- * @throws InstantiationException if unable to instantiate the class
- * @throws IllegalAccessException if not allowed to instantiate the class
- */
- public static ZipExtraField createExtraField(final ZipShort headerId)
- throws InstantiationException, IllegalAccessException {
- final Class> c = implementations.get(headerId);
- if (c != null) {
- return (ZipExtraField) c.newInstance();
- }
- final UnrecognizedExtraField u = new UnrecognizedExtraField();
- u.setHeaderId(headerId);
- return u;
- }
-
- /**
- * Split the array into ExtraFields and populate them with the
- * given data as local file data, throwing an exception if the
- * data cannot be parsed.
- * @param data an array of bytes as it appears in local file data
- * @return an array of ExtraFields
- * @throws ZipException on error
- */
- public static ZipExtraField[] parse(final byte[] data) throws ZipException {
- return parse(data, true, UnparseableExtraField.THROW);
- }
-
- /**
- * Split the array into ExtraFields and populate them with the
- * given data, throwing an exception if the data cannot be parsed.
- * @param data an array of bytes
- * @param local whether data originates from the local file data
- * or the central directory
- * @return an array of ExtraFields
- * @throws ZipException on error
- */
- public static ZipExtraField[] parse(final byte[] data, final boolean local)
- throws ZipException {
- return parse(data, local, UnparseableExtraField.THROW);
- }
-
- /**
- * Split the array into ExtraFields and populate them with the
- * given data.
- * @param data an array of bytes
- * @param local whether data originates from the local file data
- * or the central directory
- * @param onUnparseableData what to do if the extra field data
- * cannot be parsed.
- * @return an array of ExtraFields
- * @throws ZipException on error
- *
- * @since 1.1
- */
- public static ZipExtraField[] parse(final byte[] data, final boolean local,
- final UnparseableExtraField onUnparseableData)
- throws ZipException {
- final List v = new ArrayList<>();
- int start = 0;
- LOOP:
- while (start <= data.length - WORD) {
- final ZipShort headerId = new ZipShort(data, start);
- final int length = new ZipShort(data, start + 2).getValue();
- if (start + WORD + length > data.length) {
- switch(onUnparseableData.getKey()) {
- case UnparseableExtraField.THROW_KEY:
- throw new ZipException("bad extra field starting at "
- + start + ". Block length of "
- + length + " bytes exceeds remaining"
- + " data of "
- + (data.length - start - WORD)
- + " bytes.");
- case UnparseableExtraField.READ_KEY:
- final UnparseableExtraFieldData field =
- new UnparseableExtraFieldData();
- if (local) {
- field.parseFromLocalFileData(data, start,
- data.length - start);
- } else {
- field.parseFromCentralDirectoryData(data, start,
- data.length - start);
- }
- v.add(field);
- //$FALL-THROUGH$
- case UnparseableExtraField.SKIP_KEY:
- // since we cannot parse the data we must assume
- // the extra field consumes the whole rest of the
- // available data
- break LOOP;
- default:
- throw new ZipException("unknown UnparseableExtraField key: "
- + onUnparseableData.getKey());
- }
- }
- try {
- final ZipExtraField ze = createExtraField(headerId);
- try {
- if (local) {
- ze.parseFromLocalFileData(data, start + WORD, length);
- } else {
- ze.parseFromCentralDirectoryData(data, start + WORD, length);
- }
- } catch (ArrayIndexOutOfBoundsException aiobe) {
- throw (ZipException) new ZipException("Failed to parse corrupt ZIP extra field of type "
- + Integer.toHexString(headerId.getValue())).initCause(aiobe);
- }
- v.add(ze);
- } catch (final InstantiationException | IllegalAccessException ie) {
- throw (ZipException) new ZipException(ie.getMessage()).initCause(ie);
- }
- start += length + WORD;
- }
-
- final ZipExtraField[] result = new ZipExtraField[v.size()];
- return v.toArray(result);
- }
-
- /**
- * Merges the local file data fields of the given ZipExtraFields.
- * @param data an array of ExtraFiles
- * @return an array of bytes
- */
- public static byte[] mergeLocalFileDataData(final ZipExtraField[] data) {
- final boolean lastIsUnparseableHolder = data.length > 0
- && data[data.length - 1] instanceof UnparseableExtraFieldData;
- final int regularExtraFieldCount =
- lastIsUnparseableHolder ? data.length - 1 : data.length;
-
- int sum = WORD * regularExtraFieldCount;
- for (final ZipExtraField element : data) {
- sum += element.getLocalFileDataLength().getValue();
- }
-
- final byte[] result = new byte[sum];
- int start = 0;
- for (int i = 0; i < regularExtraFieldCount; i++) {
- System.arraycopy(data[i].getHeaderId().getBytes(),
- 0, result, start, 2);
- System.arraycopy(data[i].getLocalFileDataLength().getBytes(),
- 0, result, start + 2, 2);
- start += WORD;
- final byte[] local = data[i].getLocalFileDataData();
- if (local != null) {
- System.arraycopy(local, 0, result, start, local.length);
- start += local.length;
- }
- }
- if (lastIsUnparseableHolder) {
- final byte[] local = data[data.length - 1].getLocalFileDataData();
- if (local != null) {
- System.arraycopy(local, 0, result, start, local.length);
- }
- }
- return result;
- }
-
- /**
- * Merges the central directory fields of the given ZipExtraFields.
- * @param data an array of ExtraFields
- * @return an array of bytes
- */
- public static byte[] mergeCentralDirectoryData(final ZipExtraField[] data) {
- final boolean lastIsUnparseableHolder = data.length > 0
- && data[data.length - 1] instanceof UnparseableExtraFieldData;
- final int regularExtraFieldCount =
- lastIsUnparseableHolder ? data.length - 1 : data.length;
-
- int sum = WORD * regularExtraFieldCount;
- for (final ZipExtraField element : data) {
- sum += element.getCentralDirectoryLength().getValue();
- }
- final byte[] result = new byte[sum];
- int start = 0;
- for (int i = 0; i < regularExtraFieldCount; i++) {
- System.arraycopy(data[i].getHeaderId().getBytes(),
- 0, result, start, 2);
- System.arraycopy(data[i].getCentralDirectoryLength().getBytes(),
- 0, result, start + 2, 2);
- start += WORD;
- final byte[] local = data[i].getCentralDirectoryData();
- if (local != null) {
- System.arraycopy(local, 0, result, start, local.length);
- start += local.length;
- }
- }
- if (lastIsUnparseableHolder) {
- final byte[] local = data[data.length - 1].getCentralDirectoryData();
- if (local != null) {
- System.arraycopy(local, 0, result, start, local.length);
- }
- }
- return result;
- }
-
- /**
- * "enum" for the possible actions to take if the extra field
- * cannot be parsed.
- *
- * @since 1.1
- */
- public static final class UnparseableExtraField {
- /**
- * Key for "throw an exception" action.
- */
- public static final int THROW_KEY = 0;
- /**
- * Key for "skip" action.
- */
- public static final int SKIP_KEY = 1;
- /**
- * Key for "read" action.
- */
- public static final int READ_KEY = 2;
-
- /**
- * Throw an exception if field cannot be parsed.
- */
- public static final UnparseableExtraField THROW
- = new UnparseableExtraField(THROW_KEY);
-
- /**
- * Skip the extra field entirely and don't make its data
- * available - effectively removing the extra field data.
- */
- public static final UnparseableExtraField SKIP
- = new UnparseableExtraField(SKIP_KEY);
-
- /**
- * Read the extra field data into an instance of {@link
- * UnparseableExtraFieldData UnparseableExtraFieldData}.
- */
- public static final UnparseableExtraField READ
- = new UnparseableExtraField(READ_KEY);
-
- private final int key;
-
- private UnparseableExtraField(final int k) {
- key = k;
- }
-
- /**
- * Key of the action to take.
- * @return the key
- */
- public int getKey() { return key; }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/GeneralPurposeBit.java b/src/org/apache/commons/compress/archivers/zip/GeneralPurposeBit.java
deleted file mode 100644
index dd363aa9334..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/GeneralPurposeBit.java
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-/**
- * Parser/encoder for the "general purpose bit" field in ZIP's local
- * file and central directory headers.
- *
- * @since 1.1
- * @NotThreadSafe
- */
-public final class GeneralPurposeBit implements Cloneable {
-
- /**
- * Indicates that the file is encrypted.
- */
- private static final int ENCRYPTION_FLAG = 1 << 0;
-
- /**
- * Indicates the size of the sliding dictionary used by the compression method 6 (imploding).
- *
- *
0: 4096 bytes
- *
1: 8192 bytes
- *
- */
- private static final int SLIDING_DICTIONARY_SIZE_FLAG = 1 << 1;
-
- /**
- * Indicates the number of Shannon-Fano trees used by the compression method 6 (imploding).
- *
- *
0: 2 trees (lengths, distances)
- *
1: 3 trees (literals, lengths, distances)
- *
- */
- private static final int NUMBER_OF_SHANNON_FANO_TREES_FLAG = 1 << 2;
-
- /**
- * Indicates that a data descriptor stored after the file contents
- * will hold CRC and size information.
- */
- private static final int DATA_DESCRIPTOR_FLAG = 1 << 3;
-
- /**
- * Indicates strong encryption.
- */
- private static final int STRONG_ENCRYPTION_FLAG = 1 << 6;
-
- /**
- * Indicates that filenames are written in UTF-8.
- *
- *
The only reason this is public is that {@link
- * ZipArchiveOutputStream#EFS_FLAG} was public in Apache Commons
- * Compress 1.0 and we needed a substitute for it.
- */
- public static final int UFT8_NAMES_FLAG = 1 << 11;
-
- private boolean languageEncodingFlag = false;
- private boolean dataDescriptorFlag = false;
- private boolean encryptionFlag = false;
- private boolean strongEncryptionFlag = false;
- private int slidingDictionarySize;
- private int numberOfShannonFanoTrees;
-
- public GeneralPurposeBit() {
- }
-
- /**
- * whether the current entry uses UTF8 for file name and comment.
- * @return whether the current entry uses UTF8 for file name and comment.
- */
- public boolean usesUTF8ForNames() {
- return languageEncodingFlag;
- }
-
- /**
- * whether the current entry will use UTF8 for file name and comment.
- * @param b whether the current entry will use UTF8 for file name and comment.
- */
- public void useUTF8ForNames(final boolean b) {
- languageEncodingFlag = b;
- }
-
- /**
- * whether the current entry uses the data descriptor to store CRC
- * and size information.
- * @return whether the current entry uses the data descriptor to store CRC
- * and size information
- */
- public boolean usesDataDescriptor() {
- return dataDescriptorFlag;
- }
-
- /**
- * whether the current entry will use the data descriptor to store
- * CRC and size information.
- * @param b whether the current entry will use the data descriptor to store
- * CRC and size information
- */
- public void useDataDescriptor(final boolean b) {
- dataDescriptorFlag = b;
- }
-
- /**
- * whether the current entry is encrypted.
- * @return whether the current entry is encrypted
- */
- public boolean usesEncryption() {
- return encryptionFlag;
- }
-
- /**
- * whether the current entry will be encrypted.
- * @param b whether the current entry will be encrypted
- */
- public void useEncryption(final boolean b) {
- encryptionFlag = b;
- }
-
- /**
- * whether the current entry is encrypted using strong encryption.
- * @return whether the current entry is encrypted using strong encryption
- */
- public boolean usesStrongEncryption() {
- return encryptionFlag && strongEncryptionFlag;
- }
-
- /**
- * whether the current entry will be encrypted using strong encryption.
- * @param b whether the current entry will be encrypted using strong encryption
- */
- public void useStrongEncryption(final boolean b) {
- strongEncryptionFlag = b;
- if (b) {
- useEncryption(true);
- }
- }
-
- /**
- * Returns the sliding dictionary size used by the compression method 6 (imploding).
- */
- int getSlidingDictionarySize() {
- return slidingDictionarySize;
- }
-
- /**
- * Returns the number of trees used by the compression method 6 (imploding).
- */
- int getNumberOfShannonFanoTrees() {
- return numberOfShannonFanoTrees;
- }
-
- /**
- * Encodes the set bits in a form suitable for ZIP archives.
- * @return the encoded general purpose bits
- */
- public byte[] encode() {
- final byte[] result = new byte[2];
- encode(result, 0);
- return result;
- }
-
-
- /**
- * Encodes the set bits in a form suitable for ZIP archives.
- *
- * @param buf the output buffer
- * @param offset
- * The offset within the output buffer of the first byte to be written.
- * must be non-negative and no larger than buf.length-2
- */
- public void encode(final byte[] buf, final int offset) {
- ZipShort.putShort((dataDescriptorFlag ? DATA_DESCRIPTOR_FLAG : 0)
- |
- (languageEncodingFlag ? UFT8_NAMES_FLAG : 0)
- |
- (encryptionFlag ? ENCRYPTION_FLAG : 0)
- |
- (strongEncryptionFlag ? STRONG_ENCRYPTION_FLAG : 0)
- , buf, offset);
- }
-
- /**
- * Parses the supported flags from the given archive data.
- *
- * @param data local file header or a central directory entry.
- * @param offset offset at which the general purpose bit starts
- * @return parsed flags
- */
- public static GeneralPurposeBit parse(final byte[] data, final int offset) {
- final int generalPurposeFlag = ZipShort.getValue(data, offset);
- final GeneralPurposeBit b = new GeneralPurposeBit();
- b.useDataDescriptor((generalPurposeFlag & DATA_DESCRIPTOR_FLAG) != 0);
- b.useUTF8ForNames((generalPurposeFlag & UFT8_NAMES_FLAG) != 0);
- b.useStrongEncryption((generalPurposeFlag & STRONG_ENCRYPTION_FLAG) != 0);
- b.useEncryption((generalPurposeFlag & ENCRYPTION_FLAG) != 0);
- b.slidingDictionarySize = (generalPurposeFlag & SLIDING_DICTIONARY_SIZE_FLAG) != 0 ? 8192 : 4096;
- b.numberOfShannonFanoTrees = (generalPurposeFlag & NUMBER_OF_SHANNON_FANO_TREES_FLAG) != 0 ? 3 : 2;
- return b;
- }
-
- @Override
- public int hashCode() {
- return 3 * (7 * (13 * (17 * (encryptionFlag ? 1 : 0)
- + (strongEncryptionFlag ? 1 : 0))
- + (languageEncodingFlag ? 1 : 0))
- + (dataDescriptorFlag ? 1 : 0));
- }
-
- @Override
- public boolean equals(final Object o) {
- if (!(o instanceof GeneralPurposeBit)) {
- return false;
- }
- final GeneralPurposeBit g = (GeneralPurposeBit) o;
- return g.encryptionFlag == encryptionFlag
- && g.strongEncryptionFlag == strongEncryptionFlag
- && g.languageEncodingFlag == languageEncodingFlag
- && g.dataDescriptorFlag == dataDescriptorFlag;
- }
-
- @Override
- public Object clone() {
- try {
- return super.clone();
- } catch (final CloneNotSupportedException ex) {
- // impossible
- throw new RuntimeException("GeneralPurposeBit is not Cloneable?", ex); //NOSONAR
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/InflaterInputStream.java b/src/org/apache/commons/compress/archivers/zip/InflaterInputStream.java
deleted file mode 100644
index 32d20e3ef7f..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/InflaterInputStream.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.zip.Inflater;
-
-/**
- * Helper class to provide statistics
- *
- * @since 1.17
- */
-/* package */ class InflaterInputStream extends java.util.zip.InflaterInputStream
- implements InputStreamStatistics {
- private long compressedCount = 0;
- private long uncompressedCount = 0;
-
- public InflaterInputStream(InputStream in) {
- super(in);
- }
-
- public InflaterInputStream(InputStream in, Inflater inf) {
- super(in, inf);
- }
-
- public InflaterInputStream(InputStream in, Inflater inf, int size) {
- super(in, inf, size);
- }
-
- @Override
- protected void fill() throws IOException {
- super.fill();
- compressedCount += inf.getRemaining();
- }
-
- @Override
- public int read() throws IOException {
- final int b = super.read();
- if (b > -1) {
- uncompressedCount++;
- }
- return b;
- }
-
- @Override
- public int read(byte[] b, int off, int len) throws IOException {
- final int bytes = super.read(b, off, len);
- if (bytes > -1) {
- uncompressedCount += bytes;
- }
- return bytes;
- }
-
- @Override
- public long getCompressedCount() {
- return compressedCount;
- }
-
- @Override
- public long getUncompressedCount() {
- return uncompressedCount;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/InflaterInputStreamWithStatistics.java b/src/org/apache/commons/compress/archivers/zip/InflaterInputStreamWithStatistics.java
deleted file mode 100644
index b10590fa6f4..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/InflaterInputStreamWithStatistics.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.zip.Inflater;
-import java.util.zip.InflaterInputStream;
-
-/**
- * Helper class to provide statistics
- *
- * @since 1.17
- */
-/* package */ class InflaterInputStreamWithStatistics extends InflaterInputStream
- implements InputStreamStatistics {
- private long compressedCount = 0;
- private long uncompressedCount = 0;
-
- public InflaterInputStreamWithStatistics(InputStream in) {
- super(in);
- }
-
- public InflaterInputStreamWithStatistics(InputStream in, Inflater inf) {
- super(in, inf);
- }
-
- public InflaterInputStreamWithStatistics(InputStream in, Inflater inf, int size) {
- super(in, inf, size);
- }
-
- @Override
- protected void fill() throws IOException {
- super.fill();
- compressedCount += inf.getRemaining();
- }
-
- @Override
- public int read() throws IOException {
- final int b = super.read();
- if (b > -1) {
- uncompressedCount++;
- }
- return b;
- }
-
- @Override
- public int read(byte[] b, int off, int len) throws IOException {
- final int bytes = super.read(b, off, len);
- if (bytes > -1) {
- uncompressedCount += bytes;
- }
- return bytes;
- }
-
- @Override
- public long getCompressedCount() {
- return compressedCount;
- }
-
- @Override
- public long getUncompressedCount() {
- return uncompressedCount;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/JarMarker.java b/src/org/apache/commons/compress/archivers/zip/JarMarker.java
deleted file mode 100644
index ad12f37e248..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/JarMarker.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.util.zip.ZipException;
-
-/**
- * If this extra field is added as the very first extra field of the
- * archive, Solaris will consider it an executable jar file.
- * @Immutable
- */
-public final class JarMarker implements ZipExtraField {
-
- private static final ZipShort ID = new ZipShort(0xCAFE);
- private static final ZipShort NULL = new ZipShort(0);
- private static final byte[] NO_BYTES = new byte[0];
- private static final JarMarker DEFAULT = new JarMarker();
-
- /** No-arg constructor */
- public JarMarker() {
- // empty
- }
-
- /**
- * Since JarMarker is stateless we can always use the same instance.
- * @return the DEFAULT jarmaker.
- */
- public static JarMarker getInstance() {
- return DEFAULT;
- }
-
- /**
- * The Header-ID.
- * @return the header id
- */
- @Override
- public ZipShort getHeaderId() {
- return ID;
- }
-
- /**
- * Length of the extra field in the local file data - without
- * Header-ID or length specifier.
- * @return 0
- */
- @Override
- public ZipShort getLocalFileDataLength() {
- return NULL;
- }
-
- /**
- * Length of the extra field in the central directory - without
- * Header-ID or length specifier.
- * @return 0
- */
- @Override
- public ZipShort getCentralDirectoryLength() {
- return NULL;
- }
-
- /**
- * The actual data to put into local file data - without Header-ID
- * or length specifier.
- * @return the data
- */
- @Override
- public byte[] getLocalFileDataData() {
- return NO_BYTES;
- }
-
- /**
- * The actual data to put central directory - without Header-ID or
- * length specifier.
- * @return the data
- */
- @Override
- public byte[] getCentralDirectoryData() {
- return NO_BYTES;
- }
-
- /**
- * Populate data from this array as if it was in local file data.
- * @param data an array of bytes
- * @param offset the start offset
- * @param length the number of bytes in the array from offset
- *
- * @throws ZipException on error
- */
- @Override
- public void parseFromLocalFileData(final byte[] data, final int offset, final int length)
- throws ZipException {
- if (length != 0) {
- throw new ZipException("JarMarker doesn't expect any data");
- }
- }
-
- /**
- * Doesn't do anything special since this class always uses the
- * same data in central directory and local file data.
- */
- @Override
- public void parseFromCentralDirectoryData(final byte[] buffer, final int offset,
- final int length)
- throws ZipException {
- parseFromLocalFileData(buffer, offset, length);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/NioZipEncoding.java b/src/org/apache/commons/compress/archivers/zip/NioZipEncoding.java
deleted file mode 100644
index 0a7581acf0c..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/NioZipEncoding.java
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.CharBuffer;
-import java.nio.charset.Charset;
-import java.nio.charset.CharsetDecoder;
-import java.nio.charset.CharsetEncoder;
-import java.nio.charset.CoderResult;
-import java.nio.charset.CodingErrorAction;
-
-/**
- * A ZipEncoding, which uses a java.nio {@link
- * java.nio.charset.Charset Charset} to encode names.
- *
The methods of this class are reentrant.
- * @Immutable
- */
-class NioZipEncoding implements ZipEncoding, CharsetAccessor {
-
- private final Charset charset;
- private final boolean useReplacement;
- private static final char REPLACEMENT = '?';
- private static final byte[] REPLACEMENT_BYTES = { (byte) REPLACEMENT };
- private static final String REPLACEMENT_STRING = String.valueOf(REPLACEMENT);
- private static final char[] HEX_CHARS = new char[] {
- '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
- };
-
-
- /**
- * Construct an NioZipEncoding using the given charset.
- * @param charset The character set to use.
- * @param useReplacement should invalid characters be replaced, or reported.
- */
- NioZipEncoding(final Charset charset, boolean useReplacement) {
- this.charset = charset;
- this.useReplacement = useReplacement;
- }
-
- @Override
- public Charset getCharset() {
- return charset;
- }
-
- /**
- * @see ZipEncoding#canEncode(java.lang.String)
- */
- @Override
- public boolean canEncode(final String name) {
- final CharsetEncoder enc = newEncoder();
-
- return enc.canEncode(name);
- }
-
- /**
- * @see ZipEncoding#encode(java.lang.String)
- */
- @Override
- public ByteBuffer encode(final String name) {
- final CharsetEncoder enc = newEncoder();
-
- final CharBuffer cb = CharBuffer.wrap(name);
- CharBuffer tmp = null;
- ByteBuffer out = ByteBuffer.allocate(estimateInitialBufferSize(enc, cb.remaining()));
-
- while (cb.remaining() > 0) {
- final CoderResult res = enc.encode(cb, out, false);
-
- if (res.isUnmappable() || res.isMalformed()) {
-
- // write the unmappable characters in utf-16
- // pseudo-URL encoding style to ByteBuffer.
-
- int spaceForSurrogate = estimateIncrementalEncodingSize(enc, 6 * res.length());
- if (spaceForSurrogate > out.remaining()) {
- // if the destination buffer isn't over sized, assume that the presence of one
- // unmappable character makes it likely that there will be more. Find all the
- // un-encoded characters and allocate space based on those estimates.
- int charCount = 0;
- for (int i = cb.position() ; i < cb.limit(); i++) {
- charCount += !enc.canEncode(cb.get(i)) ? 6 : 1;
- }
- int totalExtraSpace = estimateIncrementalEncodingSize(enc, charCount);
- out = ZipEncodingHelper.growBufferBy(out, totalExtraSpace - out.remaining());
- }
- if (tmp == null) {
- tmp = CharBuffer.allocate(6);
- }
- for (int i = 0; i < res.length(); ++i) {
- out = encodeFully(enc, encodeSurrogate(tmp, cb.get()), out);
- }
-
- } else if (res.isOverflow()) {
- int increment = estimateIncrementalEncodingSize(enc, cb.remaining());
- out = ZipEncodingHelper.growBufferBy(out, increment);
- }
- }
- // tell the encoder we are done
- enc.encode(cb, out, true);
- // may have caused underflow, but that's been ignored traditionally
-
- out.limit(out.position());
- out.rewind();
- return out;
- }
-
- /**
- * @see
- * ZipEncoding#decode(byte[])
- */
- @Override
- public String decode(final byte[] data) throws IOException {
- return newDecoder()
- .decode(ByteBuffer.wrap(data)).toString();
- }
-
- private static ByteBuffer encodeFully(CharsetEncoder enc, CharBuffer cb, ByteBuffer out) {
- ByteBuffer o = out;
- while (cb.hasRemaining()) {
- CoderResult result = enc.encode(cb, o, false);
- if (result.isOverflow()) {
- int increment = estimateIncrementalEncodingSize(enc, cb.remaining());
- o = ZipEncodingHelper.growBufferBy(o, increment);
- }
- }
- return o;
- }
-
- private static CharBuffer encodeSurrogate(CharBuffer cb, char c) {
- cb.position(0).limit(6);
- cb.put('%');
- cb.put('U');
-
- cb.put(HEX_CHARS[(c >> 12) & 0x0f]);
- cb.put(HEX_CHARS[(c >> 8) & 0x0f]);
- cb.put(HEX_CHARS[(c >> 4) & 0x0f]);
- cb.put(HEX_CHARS[c & 0x0f]);
- cb.flip();
- return cb;
- }
-
- private CharsetEncoder newEncoder() {
- if (useReplacement) {
- return charset.newEncoder()
- .onMalformedInput(CodingErrorAction.REPLACE)
- .onUnmappableCharacter(CodingErrorAction.REPLACE)
- .replaceWith(REPLACEMENT_BYTES);
- } else {
- return charset.newEncoder()
- .onMalformedInput(CodingErrorAction.REPORT)
- .onUnmappableCharacter(CodingErrorAction.REPORT);
- }
- }
-
- private CharsetDecoder newDecoder() {
- if (!useReplacement) {
- return this.charset.newDecoder()
- .onMalformedInput(CodingErrorAction.REPORT)
- .onUnmappableCharacter(CodingErrorAction.REPORT);
- } else {
- return charset.newDecoder()
- .onMalformedInput(CodingErrorAction.REPLACE)
- .onUnmappableCharacter(CodingErrorAction.REPLACE)
- .replaceWith(REPLACEMENT_STRING);
- }
- }
-
- /**
- * Estimate the initial encoded size (in bytes) for a character buffer.
- *
- * The estimate assumes that one character consumes uses the maximum length encoding,
- * whilst the rest use an average size encoding. This accounts for any BOM for UTF-16, at
- * the expense of a couple of extra bytes for UTF-8 encoded ASCII.
- *
- *
- * @param enc encoder to use for estimates
- * @param charChount number of characters in string
- * @return estimated size in bytes.
- */
- private static int estimateInitialBufferSize(CharsetEncoder enc, int charChount) {
- float first = enc.maxBytesPerChar();
- float rest = (charChount - 1) * enc.averageBytesPerChar();
- return (int) Math.ceil(first + rest);
- }
-
- /**
- * Estimate the size needed for remaining characters
- *
- * @param enc encoder to use for estimates
- * @param charCount number of characters remaining
- * @return estimated size in bytes.
- */
- private static int estimateIncrementalEncodingSize(CharsetEncoder enc, int charCount) {
- return (int) Math.ceil(charCount * enc.averageBytesPerChar());
- }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/PKWareExtraHeader.java b/src/org/apache/commons/compress/archivers/zip/PKWareExtraHeader.java
deleted file mode 100644
index 7177c8759b1..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/PKWareExtraHeader.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Base class for all PKWare strong crypto extra headers.
- *
- *
This base class acts as a marker so you know you can ignore all
- * extra fields that extend this class if you are not interested in
- * the meta data of PKWare strong encryption.
- *
- * Algorithm IDs - integer identifier of the encryption algorithm from
- * the following range
- *
- *
- *
0x6601 - DES
- *
0x6602 - RC2 (version needed to extract < 5.2)
- *
0x6603 - 3DES 168
- *
0x6609 - 3DES 112
- *
0x660E - AES 128
- *
0x660F - AES 192
- *
0x6610 - AES 256
- *
0x6702 - RC2 (version needed to extract >= 5.2)
- *
0x6720 - Blowfish
- *
0x6721 - Twofish
- *
0x6801 - RC4
- *
0xFFFF - Unknown algorithm
- *
- *
- * Hash Algorithms - integer identifier of the hash algorithm from the
- * following range
- *
- *
- *
0x0000 - none
- *
0x0001 - CRC32
- *
0x8003 - MD5
- *
0x8004 - SHA1
- *
0x8007 - RIPEMD160
- *
0x800C - SHA256
- *
0x800D - SHA384
- *
0x800E - SHA512
- *
- *
- * @since 1.11
- */
-public abstract class PKWareExtraHeader implements ZipExtraField {
-
- private final ZipShort headerId;
- /**
- * Extra field data in local file data - without Header-ID or length
- * specifier.
- */
- private byte[] localData;
- /**
- * Extra field data in central directory - without Header-ID or length
- * specifier.
- */
- private byte[] centralData;
-
- protected PKWareExtraHeader(final ZipShort headerId) {
- this.headerId = headerId;
- }
-
- /**
- * Get the header id.
- *
- * @return the header id
- */
- @Override
- public ZipShort getHeaderId() {
- return headerId;
- }
-
- /**
- * Set the extra field data in the local file data - without Header-ID or
- * length specifier.
- *
- * @param data
- * the field data to use
- */
- public void setLocalFileDataData(final byte[] data) {
- localData = ZipUtil.copy(data);
- }
-
- /**
- * Get the length of the local data.
- *
- * @return the length of the local data
- */
- @Override
- public ZipShort getLocalFileDataLength() {
- return new ZipShort(localData != null ? localData.length : 0);
- }
-
- /**
- * Get the local data.
- *
- * @return the local data
- */
- @Override
- public byte[] getLocalFileDataData() {
- return ZipUtil.copy(localData);
- }
-
- /**
- * Set the extra field data in central directory.
- *
- * @param data
- * the data to use
- */
- public void setCentralDirectoryData(final byte[] data) {
- centralData = ZipUtil.copy(data);
- }
-
- /**
- * Get the central data length. If there is no central data, get the local
- * file data length.
- *
- * @return the central data length
- */
- @Override
- public ZipShort getCentralDirectoryLength() {
- if (centralData != null) {
- return new ZipShort(centralData.length);
- }
- return getLocalFileDataLength();
- }
-
- /**
- * Get the central data.
- *
- * @return the central data if present, else return the local file data
- */
- @Override
- public byte[] getCentralDirectoryData() {
- if (centralData != null) {
- return ZipUtil.copy(centralData);
- }
- return getLocalFileDataData();
- }
-
- /**
- * @param data
- * the array of bytes.
- * @param offset
- * the source location in the data array.
- * @param length
- * the number of bytes to use in the data array.
- * @see ZipExtraField#parseFromLocalFileData(byte[], int, int)
- */
- @Override
- public void parseFromLocalFileData(final byte[] data, final int offset, final int length) {
- final byte[] tmp = new byte[length];
- System.arraycopy(data, offset, tmp, 0, length);
- setLocalFileDataData(tmp);
- }
-
- /**
- * @param data
- * the array of bytes.
- * @param offset
- * the source location in the data array.
- * @param length
- * the number of bytes to use in the data array.
- * @see ZipExtraField#parseFromCentralDirectoryData(byte[], int, int)
- */
- @Override
- public void parseFromCentralDirectoryData(final byte[] data, final int offset, final int length) {
- final byte[] tmp = new byte[length];
- System.arraycopy(data, offset, tmp, 0, length);
- setCentralDirectoryData(tmp);
- if (localData == null) {
- setLocalFileDataData(tmp);
- }
- }
-
- /**
- * Encryption algorithm.
- *
- * @since 1.11
- */
- public enum EncryptionAlgorithm {
- DES(0x6601),
- RC2pre52(0x6602),
- TripleDES168(0x6603),
- TripleDES192(0x6609),
- AES128(0x660E),
- AES192(0x660F),
- AES256(0x6610),
- RC2(0x6702),
- RC4(0x6801),
- UNKNOWN(0xFFFF);
-
- private final int code;
-
- private static final Map codeToEnum;
-
- static {
- final Map cte = new HashMap<>();
- for (final EncryptionAlgorithm method : values()) {
- cte.put(method.getCode(), method);
- }
- codeToEnum = Collections.unmodifiableMap(cte);
- }
-
- /**
- * private constructor for enum style class.
- */
- EncryptionAlgorithm(final int code) {
- this.code = code;
- }
-
- /**
- * the algorithm id.
- *
- * @return the PKWare AlgorithmId
- */
- public int getCode() {
- return code;
- }
-
- /**
- * Returns the EncryptionAlgorithm for the given code or null if the
- * method is not known.
- * @param code the code of the algorithm
- * @return the EncryptionAlgorithm for the given code or null
- * if the method is not known
- */
- public static EncryptionAlgorithm getAlgorithmByCode(final int code) {
- return codeToEnum.get(code);
- }
- }
-
- /**
- * Hash Algorithm
- *
- * @since 1.11
- */
- public enum HashAlgorithm {
- NONE(0),
- CRC32(1),
- MD5(0x8003),
- SHA1(0x8004),
- RIPEND160(0x8007),
- SHA256(0x800C),
- SHA384(0x800D),
- SHA512(0x800E);
-
- private final int code;
-
- private static final Map codeToEnum;
-
- static {
- final Map cte = new HashMap<>();
- for (final HashAlgorithm method : values()) {
- cte.put(method.getCode(), method);
- }
- codeToEnum = Collections.unmodifiableMap(cte);
- }
-
- /**
- * private constructor for enum style class.
- */
- HashAlgorithm(final int code) {
- this.code = code;
- }
-
- /**
- * the hash algorithm ID.
- *
- * @return the PKWare hashAlg
- */
- public int getCode() {
- return code;
- }
-
- /**
- * Returns the HashAlgorithm for the given code or null if the method is
- * not known.
- * @param code the code of the algorithm
- * @return the HashAlgorithm for the given code or null
- * if the method is not known
- */
- public static HashAlgorithm getAlgorithmByCode(final int code) {
- return codeToEnum.get(code);
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ParallelScatterZipCreator.java b/src/org/apache/commons/compress/archivers/zip/ParallelScatterZipCreator.java
deleted file mode 100644
index a381d0a285e..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ParallelScatterZipCreator.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import org.apache.commons.compress.parallel.FileBasedScatterGatherBackingStore;
-import org.apache.commons.compress.parallel.InputStreamSupplier;
-import org.apache.commons.compress.parallel.ScatterGatherBackingStore;
-import org.apache.commons.compress.parallel.ScatterGatherBackingStoreSupplier;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.zip.Deflater;
-
-import static java.util.Collections.synchronizedList;
-import static org.apache.commons.compress.archivers.zip.ZipArchiveEntryRequest.createZipArchiveEntryRequest;
-
-/**
- * Creates a zip in parallel by using multiple threadlocal {@link ScatterZipOutputStream} instances.
- *
- * Note that this class generally makes no guarantees about the order of things written to
- * the output file. Things that need to come in a specific order (manifests, directories)
- * must be handled by the client of this class, usually by writing these things to the
- * {@link ZipArchiveOutputStream} before calling {@link #writeTo writeTo} on this class.
- *
- * The client can supply an {@link java.util.concurrent.ExecutorService}, but for reasons of
- * memory model consistency, this will be shut down by this class prior to completion.
- *
- * @since 1.10
- */
-public class ParallelScatterZipCreator {
- private final List streams = synchronizedList(new ArrayList());
- private final ExecutorService es;
- private final ScatterGatherBackingStoreSupplier backingStoreSupplier;
- private final List> futures = new ArrayList<>();
-
- private final long startedAt = System.currentTimeMillis();
- private long compressionDoneAt = 0;
- private long scatterDoneAt;
-
- private static class DefaultBackingStoreSupplier implements ScatterGatherBackingStoreSupplier {
- final AtomicInteger storeNum = new AtomicInteger(0);
-
- @Override
- public ScatterGatherBackingStore get() throws IOException {
- final File tempFile = File.createTempFile("parallelscatter", "n" + storeNum.incrementAndGet());
- return new FileBasedScatterGatherBackingStore(tempFile);
- }
- }
-
- private ScatterZipOutputStream createDeferred(final ScatterGatherBackingStoreSupplier scatterGatherBackingStoreSupplier)
- throws IOException {
- final ScatterGatherBackingStore bs = scatterGatherBackingStoreSupplier.get();
- // lifecycle is bound to the ScatterZipOutputStream returned
- final StreamCompressor sc = StreamCompressor.create(Deflater.DEFAULT_COMPRESSION, bs); //NOSONAR
- return new ScatterZipOutputStream(bs, sc);
- }
-
- private final ThreadLocal tlScatterStreams = new ThreadLocal() {
- @Override
- protected ScatterZipOutputStream initialValue() {
- try {
- final ScatterZipOutputStream scatterStream = createDeferred(backingStoreSupplier);
- streams.add(scatterStream);
- return scatterStream;
- } catch (final IOException e) {
- throw new RuntimeException(e); //NOSONAR
- }
- }
- };
-
- /**
- * Create a ParallelScatterZipCreator with default threads, which is set to the number of available
- * processors, as defined by {@link java.lang.Runtime#availableProcessors}
- */
- public ParallelScatterZipCreator() {
- this(Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()));
- }
-
- /**
- * Create a ParallelScatterZipCreator
- *
- * @param executorService The executorService to use for parallel scheduling. For technical reasons,
- * this will be shut down by this class.
- */
- public ParallelScatterZipCreator(final ExecutorService executorService) {
- this(executorService, new DefaultBackingStoreSupplier());
- }
-
- /**
- * Create a ParallelScatterZipCreator
- *
- * @param executorService The executorService to use. For technical reasons, this will be shut down
- * by this class.
- * @param backingStoreSupplier The supplier of backing store which shall be used
- */
- public ParallelScatterZipCreator(final ExecutorService executorService,
- final ScatterGatherBackingStoreSupplier backingStoreSupplier) {
- this.backingStoreSupplier = backingStoreSupplier;
- es = executorService;
- }
-
- /**
- * Adds an archive entry to this archive.
- *
- * This method is expected to be called from a single client thread
- *
- *
- * @param zipArchiveEntry The entry to add.
- * @param source The source input stream supplier
- */
-
- public void addArchiveEntry(final ZipArchiveEntry zipArchiveEntry, final InputStreamSupplier source) {
- submit(createCallable(zipArchiveEntry, source));
- }
-
- /**
- * Adds an archive entry to this archive.
- *
- * This method is expected to be called from a single client thread
- *
- *
- * @param zipArchiveEntryRequestSupplier Should supply the entry to be added.
- * @since 1.13
- */
- public void addArchiveEntry(final ZipArchiveEntryRequestSupplier zipArchiveEntryRequestSupplier) {
- submit(createCallable(zipArchiveEntryRequestSupplier));
- }
-
- /**
- * Submit a callable for compression.
- *
- * @see ParallelScatterZipCreator#createCallable for details of if/when to use this.
- *
- * @param callable The callable to run, created by {@link #createCallable createCallable}, possibly wrapped by caller.
- */
- public final void submit(final Callable
- *
Rewinds the stream to position at the data
- * descriptor.
- *
reads the data descriptor
- *
- *
- *
After calling this method the entry should know its size,
- * the entry's data is cached and the stream is positioned at the
- * next local file or central directory header.
- */
- private void readStoredEntry() throws IOException {
- final ByteArrayOutputStream bos = new ByteArrayOutputStream();
- int off = 0;
- boolean done = false;
-
- // length of DD without signature
- final int ddLen = current.usesZip64 ? WORD + 2 * DWORD : 3 * WORD;
-
- while (!done) {
- final int r = in.read(buf.array(), off, ZipArchiveOutputStream.BUFFER_SIZE - off);
- if (r <= 0) {
- // read the whole archive without ever finding a
- // central directory
- throw new IOException("Truncated ZIP file");
- }
- if (r + off < 4) {
- // buffer too small to check for a signature, loop
- off += r;
- continue;
- }
-
- done = bufferContainsSignature(bos, off, r, ddLen);
- if (!done) {
- off = cacheBytesRead(bos, off, r, ddLen);
- }
- }
-
- final byte[] b = bos.toByteArray();
- lastStoredEntry = new ByteArrayInputStream(b);
- }
-
- private static final byte[] LFH = ZipLong.LFH_SIG.getBytes();
- private static final byte[] CFH = ZipLong.CFH_SIG.getBytes();
- private static final byte[] DD = ZipLong.DD_SIG.getBytes();
-
- /**
- * Checks whether the current buffer contains the signature of a
- * "data descriptor", "local file header" or
- * "central directory entry".
- *
- *
If it contains such a signature, reads the data descriptor
- * and positions the stream right after the data descriptor.
- */
- private boolean bufferContainsSignature(final ByteArrayOutputStream bos, final int offset, final int lastRead, final int expectedDDLen)
- throws IOException {
-
- boolean done = false;
- int readTooMuch = 0;
- for (int i = 0; !done && i < offset + lastRead - 4; i++) {
- if (buf.array()[i] == LFH[0] && buf.array()[i + 1] == LFH[1]) {
- if ((buf.array()[i + 2] == LFH[2] && buf.array()[i + 3] == LFH[3])
- || (buf.array()[i] == CFH[2] && buf.array()[i + 3] == CFH[3])) {
- // found a LFH or CFH:
- readTooMuch = offset + lastRead - i - expectedDDLen;
- done = true;
- }
- else if (buf.array()[i + 2] == DD[2] && buf.array()[i + 3] == DD[3]) {
- // found DD:
- readTooMuch = offset + lastRead - i;
- done = true;
- }
- if (done) {
- // * push back bytes read in excess as well as the data
- // descriptor
- // * copy the remaining bytes to cache
- // * read data descriptor
- pushback(buf.array(), offset + lastRead - readTooMuch, readTooMuch);
- bos.write(buf.array(), 0, i);
- readDataDescriptor();
- }
- }
- }
- return done;
- }
-
- /**
- * If the last read bytes could hold a data descriptor and an
- * incomplete signature then save the last bytes to the front of
- * the buffer and cache everything in front of the potential data
- * descriptor into the given ByteArrayOutputStream.
- *
- *
Data descriptor plus incomplete signature (3 bytes in the
- * worst case) can be 20 bytes max.
- */
- private int cacheBytesRead(final ByteArrayOutputStream bos, int offset, final int lastRead, final int expecteDDLen) {
- final int cacheable = offset + lastRead - expecteDDLen - 3;
- if (cacheable > 0) {
- bos.write(buf.array(), 0, cacheable);
- System.arraycopy(buf.array(), cacheable, buf.array(), 0, expecteDDLen + 3);
- offset = expecteDDLen + 3;
- } else {
- offset += lastRead;
- }
- return offset;
- }
-
- private void pushback(final byte[] buf, final int offset, final int length) throws IOException {
- ((PushbackInputStream) in).unread(buf, offset, length);
- pushedBackBytes(length);
- }
-
- // End of Central Directory Record
- // end of central dir signature WORD
- // number of this disk SHORT
- // number of the disk with the
- // start of the central directory SHORT
- // total number of entries in the
- // central directory on this disk SHORT
- // total number of entries in
- // the central directory SHORT
- // size of the central directory WORD
- // offset of start of central
- // directory with respect to
- // the starting disk number WORD
- // .ZIP file comment length SHORT
- // .ZIP file comment up to 64KB
- //
-
- /**
- * Reads the stream until it find the "End of central directory
- * record" and consumes it as well.
- */
- private void skipRemainderOfArchive() throws IOException {
- // skip over central directory. One LFH has been read too much
- // already. The calculation discounts file names and extra
- // data so it will be too short.
- realSkip((long) entriesRead * CFH_LEN - LFH_LEN);
- findEocdRecord();
- realSkip((long) ZipFile.MIN_EOCD_SIZE - WORD /* signature */ - SHORT /* comment len */);
- readFully(shortBuf);
- // file comment
- realSkip(ZipShort.getValue(shortBuf));
- }
-
- /**
- * Reads forward until the signature of the "End of central
- * directory" record is found.
- */
- private void findEocdRecord() throws IOException {
- int currentByte = -1;
- boolean skipReadCall = false;
- while (skipReadCall || (currentByte = readOneByte()) > -1) {
- skipReadCall = false;
- if (!isFirstByteOfEocdSig(currentByte)) {
- continue;
- }
- currentByte = readOneByte();
- if (currentByte != ZipArchiveOutputStream.EOCD_SIG[1]) {
- if (currentByte == -1) {
- break;
- }
- skipReadCall = isFirstByteOfEocdSig(currentByte);
- continue;
- }
- currentByte = readOneByte();
- if (currentByte != ZipArchiveOutputStream.EOCD_SIG[2]) {
- if (currentByte == -1) {
- break;
- }
- skipReadCall = isFirstByteOfEocdSig(currentByte);
- continue;
- }
- currentByte = readOneByte();
- if (currentByte == -1
- || currentByte == ZipArchiveOutputStream.EOCD_SIG[3]) {
- break;
- }
- skipReadCall = isFirstByteOfEocdSig(currentByte);
- }
- }
-
- /**
- * Skips bytes by reading from the underlying stream rather than
- * the (potentially inflating) archive stream - which {@link
- * #skip} would do.
- *
- * Also updates bytes-read counter.
- */
- private void realSkip(final long value) throws IOException {
- if (value >= 0) {
- long skipped = 0;
- while (skipped < value) {
- final long rem = value - skipped;
- final int x = in.read(skipBuf, 0, (int) (skipBuf.length > rem ? rem : skipBuf.length));
- if (x == -1) {
- return;
- }
- count(x);
- skipped += x;
- }
- return;
- }
- throw new IllegalArgumentException();
- }
-
- /**
- * Reads bytes by reading from the underlying stream rather than
- * the (potentially inflating) archive stream - which {@link #read} would do.
- *
- * Also updates bytes-read counter.
- */
- private int readOneByte() throws IOException {
- final int b = in.read();
- if (b != -1) {
- count(1);
- }
- return b;
- }
-
- private boolean isFirstByteOfEocdSig(final int b) {
- return b == ZipArchiveOutputStream.EOCD_SIG[0];
- }
-
- /**
- * Structure collecting information for the entry that is
- * currently being read.
- */
- private static final class CurrentEntry {
-
- /**
- * Current ZIP entry.
- */
- private final ZipArchiveEntry entry = new ZipArchiveEntry();
-
- /**
- * Does the entry use a data descriptor?
- */
- private boolean hasDataDescriptor;
-
- /**
- * Does the entry have a ZIP64 extended information extra field.
- */
- private boolean usesZip64;
-
- /**
- * Number of bytes of entry content read by the client if the
- * entry is STORED.
- */
- private long bytesRead;
-
- /**
- * Number of bytes of entry content read from the stream.
- *
- *
This may be more than the actual entry's length as some
- * stuff gets buffered up and needs to be pushed back when the
- * end of the entry has been reached.
- */
- private long bytesReadFromStream;
-
- /**
- * The checksum calculated as the current entry is read.
- */
- private final CRC32 crc = new CRC32();
-
- /**
- * The input stream decompressing the data for shrunk and imploded entries.
- */
- private InputStream in;
- }
-
- /**
- * Bounded input stream adapted from commons-io
- */
- private class BoundedInputStream extends InputStream {
-
- /** the wrapped input stream */
- private final InputStream in;
-
- /** the max length to provide */
- private final long max;
-
- /** the number of bytes already returned */
- private long pos = 0;
-
- /**
- * Creates a new BoundedInputStream that wraps the given input
- * stream and limits it to a certain size.
- *
- * @param in The wrapped input stream
- * @param size The maximum number of bytes to return
- */
- public BoundedInputStream(final InputStream in, final long size) {
- this.max = size;
- this.in = in;
- }
-
- @Override
- public int read() throws IOException {
- if (max >= 0 && pos >= max) {
- return -1;
- }
- final int result = in.read();
- pos++;
- count(1);
- current.bytesReadFromStream++;
- return result;
- }
-
- @Override
- public int read(final byte[] b) throws IOException {
- return this.read(b, 0, b.length);
- }
-
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- if (max >= 0 && pos >= max) {
- return -1;
- }
- final long maxRead = max >= 0 ? Math.min(len, max - pos) : len;
- final int bytesRead = in.read(b, off, (int) maxRead);
-
- if (bytesRead == -1) {
- return -1;
- }
-
- pos += bytesRead;
- count(bytesRead);
- current.bytesReadFromStream += bytesRead;
- return bytesRead;
- }
-
- @Override
- public long skip(final long n) throws IOException {
- final long toSkip = max >= 0 ? Math.min(n, max - pos) : n;
- final long skippedBytes = IOUtils.skip(in, toSkip);
- pos += skippedBytes;
- return skippedBytes;
- }
-
- @Override
- public int available() throws IOException {
- if (max >= 0 && pos >= max) {
- return 0;
- }
- return in.available();
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipArchiveOutputStream.java b/src/org/apache/commons/compress/archivers/zip/ZipArchiveOutputStream.java
deleted file mode 100644
index 6a8cacc02b9..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipArchiveOutputStream.java
+++ /dev/null
@@ -1,1687 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.file.Files;
-import java.nio.file.StandardOpenOption;
-import java.util.Calendar;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.zip.Deflater;
-import java.util.zip.ZipException;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.utils.IOUtils;
-
-import static org.apache.commons.compress.archivers.zip.ZipConstants.DATA_DESCRIPTOR_MIN_VERSION;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.DEFLATE_MIN_VERSION;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.INITIAL_VERSION;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC_SHORT;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MIN_VERSION;
-import static org.apache.commons.compress.archivers.zip.ZipLong.putLong;
-import static org.apache.commons.compress.archivers.zip.ZipShort.putShort;
-
-/**
- * Reimplementation of {@link java.util.zip.ZipOutputStream
- * java.util.zip.ZipOutputStream} that does handle the extended
- * functionality of this package, especially internal/external file
- * attributes and extra fields with different layouts for local file
- * data and central directory entries.
- *
- *
This class will try to use {@link
- * java.nio.channels.SeekableByteChannel} when it knows that the
- * output is going to go to a file.
- *
- *
If SeekableByteChannel cannot be used, this implementation will use
- * a Data Descriptor to store size and CRC information for {@link
- * #DEFLATED DEFLATED} entries, this means, you don't need to
- * calculate them yourself. Unfortunately this is not possible for
- * the {@link #STORED STORED} method, here setting the CRC and
- * uncompressed size information is required before {@link
- * #putArchiveEntry(ArchiveEntry)} can be called.
- *
- *
As of Apache Commons Compress 1.3 it transparently supports Zip64
- * extensions and thus individual entries and archives larger than 4
- * GB or with more than 65536 entries in most cases but explicit
- * control is provided via {@link #setUseZip64}. If the stream can not
- * use SeekableByteChannel and you try to write a ZipArchiveEntry of
- * unknown size then Zip64 extensions will be disabled by default.
- *
- * @NotThreadSafe
- */
-public class ZipArchiveOutputStream extends ArchiveOutputStream {
-
- static final int BUFFER_SIZE = 512;
- private static final int LFH_SIG_OFFSET = 0;
- private static final int LFH_VERSION_NEEDED_OFFSET = 4;
- private static final int LFH_GPB_OFFSET = 6;
- private static final int LFH_METHOD_OFFSET = 8;
- private static final int LFH_TIME_OFFSET = 10;
- private static final int LFH_CRC_OFFSET = 14;
- private static final int LFH_COMPRESSED_SIZE_OFFSET = 18;
- private static final int LFH_ORIGINAL_SIZE_OFFSET = 22;
- private static final int LFH_FILENAME_LENGTH_OFFSET = 26;
- private static final int LFH_EXTRA_LENGTH_OFFSET = 28;
- private static final int LFH_FILENAME_OFFSET = 30;
- private static final int CFH_SIG_OFFSET = 0;
- private static final int CFH_VERSION_MADE_BY_OFFSET = 4;
- private static final int CFH_VERSION_NEEDED_OFFSET = 6;
- private static final int CFH_GPB_OFFSET = 8;
- private static final int CFH_METHOD_OFFSET = 10;
- private static final int CFH_TIME_OFFSET = 12;
- private static final int CFH_CRC_OFFSET = 16;
- private static final int CFH_COMPRESSED_SIZE_OFFSET = 20;
- private static final int CFH_ORIGINAL_SIZE_OFFSET = 24;
- private static final int CFH_FILENAME_LENGTH_OFFSET = 28;
- private static final int CFH_EXTRA_LENGTH_OFFSET = 30;
- private static final int CFH_COMMENT_LENGTH_OFFSET = 32;
- private static final int CFH_DISK_NUMBER_OFFSET = 34;
- private static final int CFH_INTERNAL_ATTRIBUTES_OFFSET = 36;
- private static final int CFH_EXTERNAL_ATTRIBUTES_OFFSET = 38;
- private static final int CFH_LFH_OFFSET = 42;
- private static final int CFH_FILENAME_OFFSET = 46;
-
- /** indicates if this archive is finished. protected for use in Jar implementation */
- protected boolean finished = false;
-
- /**
- * Compression method for deflated entries.
- */
- public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED;
-
- /**
- * Default compression level for deflated entries.
- */
- public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION;
-
- /**
- * Compression method for stored entries.
- */
- public static final int STORED = java.util.zip.ZipEntry.STORED;
-
- /**
- * default encoding for file names and comment.
- */
- static final String DEFAULT_ENCODING = ZipEncodingHelper.UTF8;
-
- /**
- * General purpose flag, which indicates that filenames are
- * written in UTF-8.
- * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead
- */
- @Deprecated
- public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG;
-
- private static final byte[] EMPTY = new byte[0];
-
- /**
- * Current entry.
- */
- private CurrentEntry entry;
-
- /**
- * The file comment.
- */
- private String comment = "";
-
- /**
- * Compression level for next entry.
- */
- private int level = DEFAULT_COMPRESSION;
-
- /**
- * Has the compression level changed when compared to the last
- * entry?
- */
- private boolean hasCompressionLevelChanged = false;
-
- /**
- * Default compression method for next entry.
- */
- private int method = java.util.zip.ZipEntry.DEFLATED;
-
- /**
- * List of ZipArchiveEntries written so far.
- */
- private final List entries =
- new LinkedList<>();
-
- private final StreamCompressor streamCompressor;
-
- /**
- * Start of central directory.
- */
- private long cdOffset = 0;
-
- /**
- * Length of central directory.
- */
- private long cdLength = 0;
-
- /**
- * Helper, a 0 as ZipShort.
- */
- private static final byte[] ZERO = {0, 0};
-
- /**
- * Helper, a 0 as ZipLong.
- */
- private static final byte[] LZERO = {0, 0, 0, 0};
-
- private static final byte[] ONE = ZipLong.getBytes(1L);
-
- /**
- * Holds some book-keeping data for each entry.
- */
- private final Map metaData =
- new HashMap<>();
-
- /**
- * The encoding to use for filenames and the file comment.
- *
- *
- */
- private String encoding = DEFAULT_ENCODING;
-
- /**
- * The zip encoding to use for filenames and the file comment.
- *
- * This field is of internal use and will be set in {@link
- * #setEncoding(String)}.
- */
- private ZipEncoding zipEncoding =
- ZipEncodingHelper.getZipEncoding(DEFAULT_ENCODING);
-
-
- /**
- * This Deflater object is used for output.
- *
- */
- protected final Deflater def;
- /**
- * Optional random access output.
- */
- private final SeekableByteChannel channel;
-
- private final OutputStream out;
-
- /**
- * whether to use the general purpose bit flag when writing UTF-8
- * filenames or not.
- */
- private boolean useUTF8Flag = true;
-
- /**
- * Whether to encode non-encodable file names as UTF-8.
- */
- private boolean fallbackToUTF8 = false;
-
- /**
- * whether to create UnicodePathExtraField-s for each entry.
- */
- private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER;
-
- /**
- * Whether anything inside this archive has used a ZIP64 feature.
- *
- * @since 1.3
- */
- private boolean hasUsedZip64 = false;
-
- private Zip64Mode zip64Mode = Zip64Mode.AsNeeded;
-
- private final byte[] copyBuffer = new byte[32768];
- private final Calendar calendarInstance = Calendar.getInstance();
-
- /**
- * Creates a new ZIP OutputStream filtering the underlying stream.
- * @param out the outputstream to zip
- */
- public ZipArchiveOutputStream(final OutputStream out) {
- this.out = out;
- this.channel = null;
- def = new Deflater(level, true);
- streamCompressor = StreamCompressor.create(out, def);
- }
-
- /**
- * Creates a new ZIP OutputStream writing to a File. Will use
- * random access if possible.
- * @param file the file to zip to
- * @throws IOException on error
- */
- public ZipArchiveOutputStream(final File file) throws IOException {
- def = new Deflater(level, true);
- OutputStream o = null;
- SeekableByteChannel _channel = null;
- StreamCompressor _streamCompressor = null;
- try {
- _channel = Files.newByteChannel(file.toPath(),
- EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.WRITE,
- StandardOpenOption.READ,
- StandardOpenOption.TRUNCATE_EXISTING));
- // will never get opened properly when an exception is thrown so doesn't need to get closed
- _streamCompressor = StreamCompressor.create(_channel, def); //NOSONAR
- } catch (final IOException e) {
- IOUtils.closeQuietly(_channel);
- _channel = null;
- o = new FileOutputStream(file);
- _streamCompressor = StreamCompressor.create(o, def);
- }
- out = o;
- channel = _channel;
- streamCompressor = _streamCompressor;
- }
-
- /**
- * Creates a new ZIP OutputStream writing to a SeekableByteChannel.
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to write to an in-memory archive using random
- * access.
- *
- * @param channel the channel to zip to
- * @throws IOException on error
- * @since 1.13
- */
- public ZipArchiveOutputStream(SeekableByteChannel channel) throws IOException {
- this.channel = channel;
- def = new Deflater(level, true);
- streamCompressor = StreamCompressor.create(channel, def);
- out = null;
- }
-
- /**
- * This method indicates whether this archive is writing to a
- * seekable stream (i.e., to a random access file).
- *
- *
For seekable streams, you don't need to calculate the CRC or
- * uncompressed size for {@link #STORED} entries before
- * invoking {@link #putArchiveEntry(ArchiveEntry)}.
- * @return true if seekable
- */
- public boolean isSeekable() {
- return channel != null;
- }
-
- /**
- * The encoding to use for filenames and the file comment.
- *
- *
- * @param encoding the encoding to use for file names, use null
- * for the platform's default encoding
- */
- public void setEncoding(final String encoding) {
- this.encoding = encoding;
- this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
- if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) {
- useUTF8Flag = false;
- }
- }
-
- /**
- * The encoding to use for filenames and the file comment.
- *
- * @return null if using the platform's default character encoding.
- */
- public String getEncoding() {
- return encoding;
- }
-
- /**
- * Whether to set the language encoding flag if the file name
- * encoding is UTF-8.
- *
- *
Defaults to true.
- *
- * @param b whether to set the language encoding flag if the file
- * name encoding is UTF-8
- */
- public void setUseLanguageEncodingFlag(final boolean b) {
- useUTF8Flag = b && ZipEncodingHelper.isUTF8(encoding);
- }
-
- /**
- * Whether to create Unicode Extra Fields.
- *
- *
Defaults to NEVER.
- *
- * @param b whether to create Unicode Extra Fields.
- */
- public void setCreateUnicodeExtraFields(final UnicodeExtraFieldPolicy b) {
- createUnicodeExtraFields = b;
- }
-
- /**
- * Whether to fall back to UTF and the language encoding flag if
- * the file name cannot be encoded using the specified encoding.
- *
- *
Defaults to false.
- *
- * @param b whether to fall back to UTF and the language encoding
- * flag if the file name cannot be encoded using the specified
- * encoding.
- */
- public void setFallbackToUTF8(final boolean b) {
- fallbackToUTF8 = b;
- }
-
- /**
- * Whether Zip64 extensions will be used.
- *
- *
When setting the mode to {@link Zip64Mode#Never Never},
- * {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link
- * #finish} or {@link #close} may throw a {@link
- * Zip64RequiredException} if the entry's size or the total size
- * of the archive exceeds 4GB or there are more than 65536 entries
- * inside the archive. Any archive created in this mode will be
- * readable by implementations that don't support Zip64.
- *
- *
When setting the mode to {@link Zip64Mode#Always Always},
- * Zip64 extensions will be used for all entries. Any archive
- * created in this mode may be unreadable by implementations that
- * don't support Zip64 even if all its contents would be.
- *
- *
When setting the mode to {@link Zip64Mode#AsNeeded
- * AsNeeded}, Zip64 extensions will transparently be used for
- * those entries that require them. This mode can only be used if
- * the uncompressed size of the {@link ZipArchiveEntry} is known
- * when calling {@link #putArchiveEntry} or the archive is written
- * to a seekable output (i.e. you have used the {@link
- * #ZipArchiveOutputStream(java.io.File) File-arg constructor}) -
- * this mode is not valid when the output stream is not seekable
- * and the uncompressed size is unknown when {@link
- * #putArchiveEntry} is called.
- *
- *
If no entry inside the resulting archive requires Zip64
- * extensions then {@link Zip64Mode#Never Never} will create the
- * smallest archive. {@link Zip64Mode#AsNeeded AsNeeded} will
- * create a slightly bigger archive if the uncompressed size of
- * any entry has initially been unknown and create an archive
- * identical to {@link Zip64Mode#Never Never} otherwise. {@link
- * Zip64Mode#Always Always} will create an archive that is at
- * least 24 bytes per entry bigger than the one {@link
- * Zip64Mode#Never Never} would create.
- *
- *
Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless
- * {@link #putArchiveEntry} is called with an entry of unknown
- * size and data is written to a non-seekable stream - in this
- * case the default is {@link Zip64Mode#Never Never}.
- *
- * @since 1.3
- * @param mode Whether Zip64 extensions will be used.
- */
- public void setUseZip64(final Zip64Mode mode) {
- zip64Mode = mode;
- }
-
- /**
- * {@inheritDoc}
- * @throws Zip64RequiredException if the archive's size exceeds 4
- * GByte or there are more than 65535 entries inside the archive
- * and {@link #setUseZip64} is {@link Zip64Mode#Never}.
- */
- @Override
- public void finish() throws IOException {
- if (finished) {
- throw new IOException("This archive has already been finished");
- }
-
- if (entry != null) {
- throw new IOException("This archive contains unclosed entries.");
- }
-
- cdOffset = streamCompressor.getTotalBytesWritten();
- writeCentralDirectoryInChunks();
-
- cdLength = streamCompressor.getTotalBytesWritten() - cdOffset;
- writeZip64CentralDirectory();
- writeCentralDirectoryEnd();
- metaData.clear();
- entries.clear();
- streamCompressor.close();
- finished = true;
- }
-
- private void writeCentralDirectoryInChunks() throws IOException {
- final int NUM_PER_WRITE = 1000;
- final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(70 * NUM_PER_WRITE);
- int count = 0;
- for (final ZipArchiveEntry ze : entries) {
- byteArrayOutputStream.write(createCentralFileHeader(ze));
- if (++count > NUM_PER_WRITE){
- writeCounted(byteArrayOutputStream.toByteArray());
- byteArrayOutputStream.reset();
- count = 0;
- }
- }
- writeCounted(byteArrayOutputStream.toByteArray());
- }
-
- /**
- * Writes all necessary data for this entry.
- * @throws IOException on error
- * @throws Zip64RequiredException if the entry's uncompressed or
- * compressed size exceeds 4 GByte and {@link #setUseZip64}
- * is {@link Zip64Mode#Never}.
- */
- @Override
- public void closeArchiveEntry() throws IOException {
- preClose();
-
- flushDeflater();
-
- final long bytesWritten = streamCompressor.getTotalBytesWritten() - entry.dataStart;
- final long realCrc = streamCompressor.getCrc32();
- entry.bytesRead = streamCompressor.getBytesRead();
- final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry);
- final boolean actuallyNeedsZip64 = handleSizesAndCrc(bytesWritten, realCrc, effectiveMode);
- closeEntry(actuallyNeedsZip64, false);
- streamCompressor.reset();
- }
-
- /**
- * Writes all necessary data for this entry.
- *
- * @param phased This entry is second phase of a 2-phase zip creation, size, compressed size and crc
- * are known in ZipArchiveEntry
- * @throws IOException on error
- * @throws Zip64RequiredException if the entry's uncompressed or
- * compressed size exceeds 4 GByte and {@link #setUseZip64}
- * is {@link Zip64Mode#Never}.
- */
- private void closeCopiedEntry(final boolean phased) throws IOException {
- preClose();
- entry.bytesRead = entry.entry.getSize();
- final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry);
- final boolean actuallyNeedsZip64 = checkIfNeedsZip64(effectiveMode);
- closeEntry(actuallyNeedsZip64, phased);
- }
-
- private void closeEntry(final boolean actuallyNeedsZip64, final boolean phased) throws IOException {
- if (!phased && channel != null) {
- rewriteSizesAndCrc(actuallyNeedsZip64);
- }
-
- if (!phased) {
- writeDataDescriptor(entry.entry);
- }
- entry = null;
- }
-
- private void preClose() throws IOException {
- if (finished) {
- throw new IOException("Stream has already been finished");
- }
-
- if (entry == null) {
- throw new IOException("No current entry to close");
- }
-
- if (!entry.hasWritten) {
- write(EMPTY, 0, 0);
- }
- }
-
- /**
- * Adds an archive entry with a raw input stream.
- *
- * If crc, size and compressed size are supplied on the entry, these values will be used as-is.
- * Zip64 status is re-established based on the settings in this stream, and the supplied value
- * is ignored.
- *
- * The entry is put and closed immediately.
- *
- * @param entry The archive entry to add
- * @param rawStream The raw input stream of a different entry. May be compressed/encrypted.
- * @throws IOException If copying fails
- */
- public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream)
- throws IOException {
- final ZipArchiveEntry ae = new ZipArchiveEntry(entry);
- if (hasZip64Extra(ae)) {
- // Will be re-added as required. this may make the file generated with this method
- // somewhat smaller than standard mode,
- // since standard mode is unable to remove the zip 64 header.
- ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID);
- }
- final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN
- && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN
- && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN;
- putArchiveEntry(ae, is2PhaseSource);
- copyFromZipInputStream(rawStream);
- closeCopiedEntry(is2PhaseSource);
- }
-
- /**
- * Ensures all bytes sent to the deflater are written to the stream.
- */
- private void flushDeflater() throws IOException {
- if (entry.entry.getMethod() == DEFLATED) {
- streamCompressor.flushDeflater();
- }
- }
-
- /**
- * Ensures the current entry's size and CRC information is set to
- * the values just written, verifies it isn't too big in the
- * Zip64Mode.Never case and returns whether the entry would
- * require a Zip64 extra field.
- */
- private boolean handleSizesAndCrc(final long bytesWritten, final long crc,
- final Zip64Mode effectiveMode)
- throws ZipException {
- if (entry.entry.getMethod() == DEFLATED) {
- /* It turns out def.getBytesRead() returns wrong values if
- * the size exceeds 4 GB on Java < Java7
- entry.entry.setSize(def.getBytesRead());
- */
- entry.entry.setSize(entry.bytesRead);
- entry.entry.setCompressedSize(bytesWritten);
- entry.entry.setCrc(crc);
-
- } else if (channel == null) {
- if (entry.entry.getCrc() != crc) {
- throw new ZipException("bad CRC checksum for entry "
- + entry.entry.getName() + ": "
- + Long.toHexString(entry.entry.getCrc())
- + " instead of "
- + Long.toHexString(crc));
- }
-
- if (entry.entry.getSize() != bytesWritten) {
- throw new ZipException("bad size for entry "
- + entry.entry.getName() + ": "
- + entry.entry.getSize()
- + " instead of "
- + bytesWritten);
- }
- } else { /* method is STORED and we used SeekableByteChannel */
- entry.entry.setSize(bytesWritten);
- entry.entry.setCompressedSize(bytesWritten);
- entry.entry.setCrc(crc);
- }
-
- return checkIfNeedsZip64(effectiveMode);
- }
-
- /**
- * Verifies the sizes aren't too big in the Zip64Mode.Never case
- * and returns whether the entry would require a Zip64 extra
- * field.
- */
- private boolean checkIfNeedsZip64(final Zip64Mode effectiveMode)
- throws ZipException {
- final boolean actuallyNeedsZip64 = isZip64Required(entry.entry, effectiveMode);
- if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) {
- throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry));
- }
- return actuallyNeedsZip64;
- }
-
- private boolean isZip64Required(final ZipArchiveEntry entry1, final Zip64Mode requestedMode) {
- return requestedMode == Zip64Mode.Always || isTooLageForZip32(entry1);
- }
-
- private boolean isTooLageForZip32(final ZipArchiveEntry zipArchiveEntry){
- return zipArchiveEntry.getSize() >= ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZIP64_MAGIC;
- }
-
- /**
- * When using random access output, write the local file header
- * and potentiall the ZIP64 extra containing the correct CRC and
- * compressed/uncompressed sizes.
- */
- private void rewriteSizesAndCrc(final boolean actuallyNeedsZip64)
- throws IOException {
- final long save = channel.position();
-
- channel.position(entry.localDataStart);
- writeOut(ZipLong.getBytes(entry.entry.getCrc()));
- if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) {
- writeOut(ZipLong.getBytes(entry.entry.getCompressedSize()));
- writeOut(ZipLong.getBytes(entry.entry.getSize()));
- } else {
- writeOut(ZipLong.ZIP64_MAGIC.getBytes());
- writeOut(ZipLong.ZIP64_MAGIC.getBytes());
- }
-
- if (hasZip64Extra(entry.entry)) {
- final ByteBuffer name = getName(entry.entry);
- final int nameLen = name.limit() - name.position();
- // seek to ZIP64 extra, skip header and size information
- channel.position(entry.localDataStart + 3 * WORD + 2 * SHORT
- + nameLen + 2 * SHORT);
- // inside the ZIP64 extra uncompressed size comes
- // first, unlike the LFH, CD or data descriptor
- writeOut(ZipEightByteInteger.getBytes(entry.entry.getSize()));
- writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize()));
-
- if (!actuallyNeedsZip64) {
- // do some cleanup:
- // * rewrite version needed to extract
- channel.position(entry.localDataStart - 5 * SHORT);
- writeOut(ZipShort.getBytes(versionNeededToExtract(entry.entry.getMethod(), false, false)));
-
- // * remove ZIP64 extra so it doesn't get written
- // to the central directory
- entry.entry.removeExtraField(Zip64ExtendedInformationExtraField
- .HEADER_ID);
- entry.entry.setExtra();
-
- // * reset hasUsedZip64 if it has been set because
- // of this entry
- if (entry.causedUseOfZip64) {
- hasUsedZip64 = false;
- }
- }
- }
- channel.position(save);
- }
-
- /**
- * {@inheritDoc}
- * @throws ClassCastException if entry is not an instance of ZipArchiveEntry
- * @throws Zip64RequiredException if the entry's uncompressed or
- * compressed size is known to exceed 4 GByte and {@link #setUseZip64}
- * is {@link Zip64Mode#Never}.
- */
- @Override
- public void putArchiveEntry(final ArchiveEntry archiveEntry) throws IOException {
- putArchiveEntry(archiveEntry, false);
- }
-
- /**
- * Writes the headers for an archive entry to the output stream.
- * The caller must then write the content to the stream and call
- * {@link #closeArchiveEntry()} to complete the process.
-
- * @param archiveEntry The archiveEntry
- * @param phased If true size, compressedSize and crc required to be known up-front in the archiveEntry
- * @throws ClassCastException if entry is not an instance of ZipArchiveEntry
- * @throws Zip64RequiredException if the entry's uncompressed or
- * compressed size is known to exceed 4 GByte and {@link #setUseZip64}
- * is {@link Zip64Mode#Never}.
- */
- private void putArchiveEntry(final ArchiveEntry archiveEntry, final boolean phased) throws IOException {
- if (finished) {
- throw new IOException("Stream has already been finished");
- }
-
- if (entry != null) {
- closeArchiveEntry();
- }
-
- entry = new CurrentEntry((ZipArchiveEntry) archiveEntry);
- entries.add(entry.entry);
-
- setDefaults(entry.entry);
-
- final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry);
- validateSizeInformation(effectiveMode);
-
- if (shouldAddZip64Extra(entry.entry, effectiveMode)) {
-
- final Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry);
-
- ZipEightByteInteger size;
- ZipEightByteInteger compressedSize;
- if (phased) {
- // sizes are already known
- size = new ZipEightByteInteger(entry.entry.getSize());
- compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize());
- } else if (entry.entry.getMethod() == STORED
- && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) {
- // actually, we already know the sizes
- compressedSize = size = new ZipEightByteInteger(entry.entry.getSize());
- } else {
- // just a placeholder, real data will be in data
- // descriptor or inserted later via SeekableByteChannel
- compressedSize = size = ZipEightByteInteger.ZERO;
- }
- z64.setSize(size);
- z64.setCompressedSize(compressedSize);
- entry.entry.setExtra();
- }
-
- if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) {
- def.setLevel(level);
- hasCompressionLevelChanged = false;
- }
- writeLocalFileHeader((ZipArchiveEntry) archiveEntry, phased);
- }
-
- /**
- * Provides default values for compression method and last
- * modification time.
- */
- private void setDefaults(final ZipArchiveEntry entry) {
- if (entry.getMethod() == -1) { // not specified
- entry.setMethod(method);
- }
-
- if (entry.getTime() == -1) { // not specified
- entry.setTime(System.currentTimeMillis());
- }
- }
-
- /**
- * Throws an exception if the size is unknown for a stored entry
- * that is written to a non-seekable output or the entry is too
- * big to be written without Zip64 extra but the mode has been set
- * to Never.
- */
- private void validateSizeInformation(final Zip64Mode effectiveMode)
- throws ZipException {
- // Size/CRC not required if SeekableByteChannel is used
- if (entry.entry.getMethod() == STORED && channel == null) {
- if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) {
- throw new ZipException("uncompressed size is required for"
- + " STORED method when not writing to a"
- + " file");
- }
- if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) {
- throw new ZipException("crc checksum is required for STORED"
- + " method when not writing to a file");
- }
- entry.entry.setCompressedSize(entry.entry.getSize());
- }
-
- if ((entry.entry.getSize() >= ZIP64_MAGIC
- || entry.entry.getCompressedSize() >= ZIP64_MAGIC)
- && effectiveMode == Zip64Mode.Never) {
- throw new Zip64RequiredException(Zip64RequiredException
- .getEntryTooBigMessage(entry.entry));
- }
- }
-
- /**
- * Whether to addd a Zip64 extended information extra field to the
- * local file header.
- *
- *
Returns true if
- *
- *
- *
mode is Always
- *
or we already know it is going to be needed
- *
or the size is unknown and we can ensure it won't hurt
- * other implementations if we add it (i.e. we can erase its
- * usage
- * @param level the compression level.
- * @throws IllegalArgumentException if an invalid compression
- * level is specified.
- */
- public void setLevel(final int level) {
- if (level < Deflater.DEFAULT_COMPRESSION
- || level > Deflater.BEST_COMPRESSION) {
- throw new IllegalArgumentException("Invalid compression level: "
- + level);
- }
- hasCompressionLevelChanged = (this.level != level);
- this.level = level;
- }
-
- /**
- * Sets the default compression method for subsequent entries.
- *
- *
Default is DEFLATED.
- * @param method an int from java.util.zip.ZipEntry
- */
- public void setMethod(final int method) {
- this.method = method;
- }
-
- /**
- * Whether this stream is able to write the given entry.
- *
- *
May return false if it is set up to use encryption or a
- * compression method that hasn't been implemented yet.
- * @since 1.1
- */
- @Override
- public boolean canWriteEntryData(final ArchiveEntry ae) {
- if (ae instanceof ZipArchiveEntry) {
- final ZipArchiveEntry zae = (ZipArchiveEntry) ae;
- return zae.getMethod() != ZipMethod.IMPLODING.getCode()
- && zae.getMethod() != ZipMethod.UNSHRINKING.getCode()
- && ZipUtil.canHandleEntryData(zae);
- }
- return false;
- }
-
- /**
- * Writes bytes to ZIP entry.
- * @param b the byte array to write
- * @param offset the start position to write from
- * @param length the number of bytes to write
- * @throws IOException on error
- */
- @Override
- public void write(final byte[] b, final int offset, final int length) throws IOException {
- if (entry == null) {
- throw new IllegalStateException("No current entry");
- }
- ZipUtil.checkRequestedFeatures(entry.entry);
- final long writtenThisTime = streamCompressor.write(b, offset, length, entry.entry.getMethod());
- count(writtenThisTime);
- }
-
- /**
- * Write bytes to output or random access file.
- * @param data the byte array to write
- * @throws IOException on error
- */
- private void writeCounted(final byte[] data) throws IOException {
- streamCompressor.writeCounted(data);
- }
-
- private void copyFromZipInputStream(final InputStream src) throws IOException {
- if (entry == null) {
- throw new IllegalStateException("No current entry");
- }
- ZipUtil.checkRequestedFeatures(entry.entry);
- entry.hasWritten = true;
- int length;
- while ((length = src.read(copyBuffer)) >= 0 )
- {
- streamCompressor.writeCounted(copyBuffer, 0, length);
- count( length );
- }
- }
-
- /**
- * Closes this output stream and releases any system resources
- * associated with the stream.
- *
- * @throws IOException if an I/O error occurs.
- * @throws Zip64RequiredException if the archive's size exceeds 4
- * GByte or there are more than 65535 entries inside the archive
- * and {@link #setUseZip64} is {@link Zip64Mode#Never}.
- */
- @Override
- public void close() throws IOException {
- if (!finished) {
- finish();
- }
- destroy();
- }
-
- /**
- * Flushes this output stream and forces any buffered output bytes
- * to be written out to the stream.
- *
- * @throws IOException if an I/O error occurs.
- */
- @Override
- public void flush() throws IOException {
- if (out != null) {
- out.flush();
- }
- }
-
- /*
- * Various ZIP constants shared between this class, ZipArchiveInputStream and ZipFile
- */
- /**
- * local file header signature
- */
- static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes(); //NOSONAR
- /**
- * data descriptor signature
- */
- static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes(); //NOSONAR
- /**
- * central file header signature
- */
- static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes(); //NOSONAR
- /**
- * end of central dir signature
- */
- static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L); //NOSONAR
- /**
- * ZIP64 end of central dir signature
- */
- static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L); //NOSONAR
- /**
- * ZIP64 end of central dir locator signature
- */
- static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L); //NOSONAR
-
- /**
- * Writes next block of compressed data to the output stream.
- * @throws IOException on error
- */
- protected final void deflate() throws IOException {
- streamCompressor.deflate();
- }
-
- /**
- * Writes the local file header entry
- * @param ze the entry to write
- * @throws IOException on error
- */
- protected void writeLocalFileHeader(final ZipArchiveEntry ze) throws IOException {
- writeLocalFileHeader(ze, false);
- }
-
- private void writeLocalFileHeader(final ZipArchiveEntry ze, final boolean phased) throws IOException {
- final boolean encodable = zipEncoding.canEncode(ze.getName());
- final ByteBuffer name = getName(ze);
-
- if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) {
- addUnicodeExtraFields(ze, encodable, name);
- }
-
- final long localHeaderStart = streamCompressor.getTotalBytesWritten();
- final byte[] localHeader = createLocalFileHeader(ze, name, encodable, phased, localHeaderStart);
- metaData.put(ze, new EntryMetaData(localHeaderStart, usesDataDescriptor(ze.getMethod(), phased)));
- entry.localDataStart = localHeaderStart + LFH_CRC_OFFSET; // At crc offset
- writeCounted(localHeader);
- entry.dataStart = streamCompressor.getTotalBytesWritten();
- }
-
-
- private byte[] createLocalFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final boolean encodable,
- final boolean phased, long archiveOffset) {
- ResourceAlignmentExtraField oldAlignmentEx =
- (ResourceAlignmentExtraField) ze.getExtraField(ResourceAlignmentExtraField.ID);
- if (oldAlignmentEx != null) {
- ze.removeExtraField(ResourceAlignmentExtraField.ID);
- }
-
- int alignment = ze.getAlignment();
- if (alignment <= 0 && oldAlignmentEx != null) {
- alignment = oldAlignmentEx.getAlignment();
- }
-
- if (alignment > 1 || (oldAlignmentEx != null && !oldAlignmentEx.allowMethodChange())) {
- int oldLength = LFH_FILENAME_OFFSET +
- name.limit() - name.position() +
- ze.getLocalFileDataExtra().length;
-
- int padding = (int) ((-archiveOffset - oldLength - ZipExtraField.EXTRAFIELD_HEADER_SIZE
- - ResourceAlignmentExtraField.BASE_SIZE) &
- (alignment - 1));
- ze.addExtraField(new ResourceAlignmentExtraField(alignment,
- oldAlignmentEx != null && oldAlignmentEx.allowMethodChange(), padding));
- }
-
- final byte[] extra = ze.getLocalFileDataExtra();
- final int nameLen = name.limit() - name.position();
- final int len = LFH_FILENAME_OFFSET + nameLen + extra.length;
- final byte[] buf = new byte[len];
-
- System.arraycopy(LFH_SIG, 0, buf, LFH_SIG_OFFSET, WORD);
-
- //store method in local variable to prevent multiple method calls
- final int zipMethod = ze.getMethod();
- final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased);
-
- putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET);
-
- final GeneralPurposeBit generalPurposeBit = getGeneralPurposeBits(!encodable && fallbackToUTF8, dataDescriptor);
- generalPurposeBit.encode(buf, LFH_GPB_OFFSET);
-
- // compression method
- putShort(zipMethod, buf, LFH_METHOD_OFFSET);
-
- ZipUtil.toDosTime(calendarInstance, ze.getTime(), buf, LFH_TIME_OFFSET);
-
- // CRC
- if (phased){
- putLong(ze.getCrc(), buf, LFH_CRC_OFFSET);
- } else if (zipMethod == DEFLATED || channel != null) {
- System.arraycopy(LZERO, 0, buf, LFH_CRC_OFFSET, WORD);
- } else {
- putLong(ze.getCrc(), buf, LFH_CRC_OFFSET);
- }
-
- // compressed length
- // uncompressed length
- if (hasZip64Extra(entry.entry)){
- // point to ZIP64 extended information extra field for
- // sizes, may get rewritten once sizes are known if
- // stream is seekable
- ZipLong.ZIP64_MAGIC.putLong(buf, LFH_COMPRESSED_SIZE_OFFSET);
- ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET);
- } else if (phased) {
- putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET);
- putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET);
- } else if (zipMethod == DEFLATED || channel != null) {
- System.arraycopy(LZERO, 0, buf, LFH_COMPRESSED_SIZE_OFFSET, WORD);
- System.arraycopy(LZERO, 0, buf, LFH_ORIGINAL_SIZE_OFFSET, WORD);
- } else { // Stored
- putLong(ze.getSize(), buf, LFH_COMPRESSED_SIZE_OFFSET);
- putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET);
- }
- // file name length
- putShort(nameLen, buf, LFH_FILENAME_LENGTH_OFFSET);
-
- // extra field length
- putShort(extra.length, buf, LFH_EXTRA_LENGTH_OFFSET);
-
- // file name
- System.arraycopy( name.array(), name.arrayOffset(), buf, LFH_FILENAME_OFFSET, nameLen);
-
- // extra fields
- System.arraycopy(extra, 0, buf, LFH_FILENAME_OFFSET + nameLen, extra.length);
-
- return buf;
- }
-
-
- /**
- * Adds UnicodeExtra fields for name and file comment if mode is
- * ALWAYS or the data cannot be encoded using the configured
- * encoding.
- */
- private void addUnicodeExtraFields(final ZipArchiveEntry ze, final boolean encodable,
- final ByteBuffer name)
- throws IOException {
- if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS
- || !encodable) {
- ze.addExtraField(new UnicodePathExtraField(ze.getName(),
- name.array(),
- name.arrayOffset(),
- name.limit()
- - name.position()));
- }
-
- final String comm = ze.getComment();
- if (comm != null && !"".equals(comm)) {
-
- final boolean commentEncodable = zipEncoding.canEncode(comm);
-
- if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS
- || !commentEncodable) {
- final ByteBuffer commentB = getEntryEncoding(ze).encode(comm);
- ze.addExtraField(new UnicodeCommentExtraField(comm,
- commentB.array(),
- commentB.arrayOffset(),
- commentB.limit()
- - commentB.position())
- );
- }
- }
- }
-
- /**
- * Writes the data descriptor entry.
- * @param ze the entry to write
- * @throws IOException on error
- */
- protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException {
- if (!usesDataDescriptor(ze.getMethod(), false)) {
- return;
- }
- writeCounted(DD_SIG);
- writeCounted(ZipLong.getBytes(ze.getCrc()));
- if (!hasZip64Extra(ze)) {
- writeCounted(ZipLong.getBytes(ze.getCompressedSize()));
- writeCounted(ZipLong.getBytes(ze.getSize()));
- } else {
- writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize()));
- writeCounted(ZipEightByteInteger.getBytes(ze.getSize()));
- }
- }
-
- /**
- * Writes the central file header entry.
- * @param ze the entry to write
- * @throws IOException on error
- * @throws Zip64RequiredException if the archive's size exceeds 4
- * GByte and {@link Zip64Mode #setUseZip64} is {@link
- * Zip64Mode#Never}.
- */
- protected void writeCentralFileHeader(final ZipArchiveEntry ze) throws IOException {
- final byte[] centralFileHeader = createCentralFileHeader(ze);
- writeCounted(centralFileHeader);
- }
-
- private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException {
-
- final EntryMetaData entryMetaData = metaData.get(ze);
- final boolean needsZip64Extra = hasZip64Extra(ze)
- || ze.getCompressedSize() >= ZIP64_MAGIC
- || ze.getSize() >= ZIP64_MAGIC
- || entryMetaData.offset >= ZIP64_MAGIC
- || zip64Mode == Zip64Mode.Always;
-
- if (needsZip64Extra && zip64Mode == Zip64Mode.Never) {
- // must be the offset that is too big, otherwise an
- // exception would have been throw in putArchiveEntry or
- // closeArchiveEntry
- throw new Zip64RequiredException(Zip64RequiredException
- .ARCHIVE_TOO_BIG_MESSAGE);
- }
-
-
- handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra);
-
- return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra);
- }
-
- /**
- * Writes the central file header entry.
- * @param ze the entry to write
- * @param name The encoded name
- * @param entryMetaData meta data for this file
- * @throws IOException on error
- */
- private byte[] createCentralFileHeader(final ZipArchiveEntry ze, final ByteBuffer name,
- final EntryMetaData entryMetaData,
- final boolean needsZip64Extra) throws IOException {
- final byte[] extra = ze.getCentralDirectoryExtra();
-
- // file comment length
- String comm = ze.getComment();
- if (comm == null) {
- comm = "";
- }
-
- final ByteBuffer commentB = getEntryEncoding(ze).encode(comm);
- final int nameLen = name.limit() - name.position();
- final int commentLen = commentB.limit() - commentB.position();
- final int len= CFH_FILENAME_OFFSET + nameLen + extra.length + commentLen;
- final byte[] buf = new byte[len];
-
- System.arraycopy(CFH_SIG, 0, buf, CFH_SIG_OFFSET, WORD);
-
- // version made by
- // CheckStyle:MagicNumber OFF
- putShort((ze.getPlatform() << 8) | (!hasUsedZip64 ? DATA_DESCRIPTOR_MIN_VERSION : ZIP64_MIN_VERSION),
- buf, CFH_VERSION_MADE_BY_OFFSET);
-
- final int zipMethod = ze.getMethod();
- final boolean encodable = zipEncoding.canEncode(ze.getName());
- putShort(versionNeededToExtract(zipMethod, needsZip64Extra, entryMetaData.usesDataDescriptor),
- buf, CFH_VERSION_NEEDED_OFFSET);
- getGeneralPurposeBits(!encodable && fallbackToUTF8, entryMetaData.usesDataDescriptor).encode(buf, CFH_GPB_OFFSET);
-
- // compression method
- putShort(zipMethod, buf, CFH_METHOD_OFFSET);
-
-
- // last mod. time and date
- ZipUtil.toDosTime(calendarInstance, ze.getTime(), buf, CFH_TIME_OFFSET);
-
- // CRC
- // compressed length
- // uncompressed length
- putLong(ze.getCrc(), buf, CFH_CRC_OFFSET);
- if (ze.getCompressedSize() >= ZIP64_MAGIC
- || ze.getSize() >= ZIP64_MAGIC
- || zip64Mode == Zip64Mode.Always) {
- ZipLong.ZIP64_MAGIC.putLong(buf, CFH_COMPRESSED_SIZE_OFFSET);
- ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET);
- } else {
- putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET);
- putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET);
- }
-
- putShort(nameLen, buf, CFH_FILENAME_LENGTH_OFFSET);
-
- // extra field length
- putShort(extra.length, buf, CFH_EXTRA_LENGTH_OFFSET);
-
- putShort(commentLen, buf, CFH_COMMENT_LENGTH_OFFSET);
-
- // disk number start
- System.arraycopy(ZERO, 0, buf, CFH_DISK_NUMBER_OFFSET, SHORT);
-
- // internal file attributes
- putShort(ze.getInternalAttributes(), buf, CFH_INTERNAL_ATTRIBUTES_OFFSET);
-
- // external file attributes
- putLong(ze.getExternalAttributes(), buf, CFH_EXTERNAL_ATTRIBUTES_OFFSET);
-
- // relative offset of LFH
- if (entryMetaData.offset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) {
- putLong(ZIP64_MAGIC, buf, CFH_LFH_OFFSET);
- } else {
- putLong(Math.min(entryMetaData.offset, ZIP64_MAGIC), buf, CFH_LFH_OFFSET);
- }
-
- // file name
- System.arraycopy(name.array(), name.arrayOffset(), buf, CFH_FILENAME_OFFSET, nameLen);
-
- final int extraStart = CFH_FILENAME_OFFSET + nameLen;
- System.arraycopy(extra, 0, buf, extraStart, extra.length);
-
- final int commentStart = extraStart + extra.length;
-
- // file comment
- System.arraycopy(commentB.array(), commentB.arrayOffset(), buf, commentStart, commentLen);
- return buf;
- }
-
- /**
- * If the entry needs Zip64 extra information inside the central
- * directory then configure its data.
- */
- private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset,
- final boolean needsZip64Extra) {
- if (needsZip64Extra) {
- final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze);
- if (ze.getCompressedSize() >= ZIP64_MAGIC
- || ze.getSize() >= ZIP64_MAGIC
- || zip64Mode == Zip64Mode.Always) {
- z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize()));
- z64.setSize(new ZipEightByteInteger(ze.getSize()));
- } else {
- // reset value that may have been set for LFH
- z64.setCompressedSize(null);
- z64.setSize(null);
- }
- if (lfhOffset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) {
- z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset));
- }
- ze.setExtra();
- }
- }
-
- /**
- * Writes the "End of central dir record".
- * @throws IOException on error
- * @throws Zip64RequiredException if the archive's size exceeds 4
- * GByte or there are more than 65535 entries inside the archive
- * and {@link Zip64Mode #setUseZip64} is {@link Zip64Mode#Never}.
- */
- protected void writeCentralDirectoryEnd() throws IOException {
- writeCounted(EOCD_SIG);
-
- // disk numbers
- writeCounted(ZERO);
- writeCounted(ZERO);
-
- // number of entries
- final int numberOfEntries = entries.size();
- if (numberOfEntries > ZIP64_MAGIC_SHORT
- && zip64Mode == Zip64Mode.Never) {
- throw new Zip64RequiredException(Zip64RequiredException
- .TOO_MANY_ENTRIES_MESSAGE);
- }
- if (cdOffset > ZIP64_MAGIC && zip64Mode == Zip64Mode.Never) {
- throw new Zip64RequiredException(Zip64RequiredException
- .ARCHIVE_TOO_BIG_MESSAGE);
- }
-
- final byte[] num = ZipShort.getBytes(Math.min(numberOfEntries,
- ZIP64_MAGIC_SHORT));
- writeCounted(num);
- writeCounted(num);
-
- // length and location of CD
- writeCounted(ZipLong.getBytes(Math.min(cdLength, ZIP64_MAGIC)));
- writeCounted(ZipLong.getBytes(Math.min(cdOffset, ZIP64_MAGIC)));
-
- // ZIP file comment
- final ByteBuffer data = this.zipEncoding.encode(comment);
- final int dataLen = data.limit() - data.position();
- writeCounted(ZipShort.getBytes(dataLen));
- streamCompressor.writeCounted(data.array(), data.arrayOffset(), dataLen);
- }
-
- /**
- * Writes the "ZIP64 End of central dir record" and
- * "ZIP64 End of central dir locator".
- * @throws IOException on error
- * @since 1.3
- */
- protected void writeZip64CentralDirectory() throws IOException {
- if (zip64Mode == Zip64Mode.Never) {
- return;
- }
-
- if (!hasUsedZip64
- && (cdOffset >= ZIP64_MAGIC || cdLength >= ZIP64_MAGIC
- || entries.size() >= ZIP64_MAGIC_SHORT)) {
- // actually "will use"
- hasUsedZip64 = true;
- }
-
- if (!hasUsedZip64) {
- return;
- }
-
- final long offset = streamCompressor.getTotalBytesWritten();
-
- writeOut(ZIP64_EOCD_SIG);
- // size, we don't have any variable length as we don't support
- // the extensible data sector, yet
- writeOut(ZipEightByteInteger
- .getBytes(SHORT /* version made by */
- + SHORT /* version needed to extract */
- + WORD /* disk number */
- + WORD /* disk with central directory */
- + DWORD /* number of entries in CD on this disk */
- + DWORD /* total number of entries */
- + DWORD /* size of CD */
- + (long) DWORD /* offset of CD */
- ));
-
- // version made by and version needed to extract
- writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION));
- writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION));
-
- // disk numbers - four bytes this time
- writeOut(LZERO);
- writeOut(LZERO);
-
- // number of entries
- final byte[] num = ZipEightByteInteger.getBytes(entries.size());
- writeOut(num);
- writeOut(num);
-
- // length and location of CD
- writeOut(ZipEightByteInteger.getBytes(cdLength));
- writeOut(ZipEightByteInteger.getBytes(cdOffset));
-
- // no "zip64 extensible data sector" for now
-
- // and now the "ZIP64 end of central directory locator"
- writeOut(ZIP64_EOCD_LOC_SIG);
-
- // disk number holding the ZIP64 EOCD record
- writeOut(LZERO);
- // relative offset of ZIP64 EOCD record
- writeOut(ZipEightByteInteger.getBytes(offset));
- // total number of disks
- writeOut(ONE);
- }
-
- /**
- * Write bytes to output or random access file.
- * @param data the byte array to write
- * @throws IOException on error
- */
- protected final void writeOut(final byte[] data) throws IOException {
- streamCompressor.writeOut(data, 0, data.length);
- }
-
-
- /**
- * Write bytes to output or random access file.
- * @param data the byte array to write
- * @param offset the start position to write from
- * @param length the number of bytes to write
- * @throws IOException on error
- */
- protected final void writeOut(final byte[] data, final int offset, final int length)
- throws IOException {
- streamCompressor.writeOut(data, offset, length);
- }
-
-
- private GeneralPurposeBit getGeneralPurposeBits(final boolean utfFallback, boolean usesDataDescriptor) {
- final GeneralPurposeBit b = new GeneralPurposeBit();
- b.useUTF8ForNames(useUTF8Flag || utfFallback);
- if (usesDataDescriptor) {
- b.useDataDescriptor(true);
- }
- return b;
- }
-
- private int versionNeededToExtract(final int zipMethod, final boolean zip64, final boolean usedDataDescriptor) {
- if (zip64) {
- return ZIP64_MIN_VERSION;
- }
- if (usedDataDescriptor) {
- return DATA_DESCRIPTOR_MIN_VERSION;
- }
- return versionNeededToExtractMethod(zipMethod);
- }
-
- private boolean usesDataDescriptor(final int zipMethod, boolean phased) {
- return !phased && zipMethod == DEFLATED && channel == null;
- }
-
- private int versionNeededToExtractMethod(int zipMethod) {
- return zipMethod == DEFLATED ? DEFLATE_MIN_VERSION : INITIAL_VERSION;
- }
-
- /**
- * Creates a new zip entry taking some information from the given
- * file and using the provided name.
- *
- *
The name will be adjusted to end with a forward slash "/" if
- * the file is a directory. If the file is not a directory a
- * potential trailing forward slash will be stripped from the
- * entry name.
- *
- *
Must not be used if the stream has already been closed.
- */
- @Override
- public ArchiveEntry createArchiveEntry(final File inputFile, final String entryName)
- throws IOException {
- if (finished) {
- throw new IOException("Stream has already been finished");
- }
- return new ZipArchiveEntry(inputFile, entryName);
- }
-
- /**
- * Get the existing ZIP64 extended information extra field or
- * create a new one and add it to the entry.
- *
- * @since 1.3
- */
- private Zip64ExtendedInformationExtraField
- getZip64Extra(final ZipArchiveEntry ze) {
- if (entry != null) {
- entry.causedUseOfZip64 = !hasUsedZip64;
- }
- hasUsedZip64 = true;
- Zip64ExtendedInformationExtraField z64 =
- (Zip64ExtendedInformationExtraField)
- ze.getExtraField(Zip64ExtendedInformationExtraField
- .HEADER_ID);
- if (z64 == null) {
- /*
- System.err.println("Adding z64 for " + ze.getName()
- + ", method: " + ze.getMethod()
- + " (" + (ze.getMethod() == STORED) + ")"
- + ", channel: " + (channel != null));
- */
- z64 = new Zip64ExtendedInformationExtraField();
- }
-
- // even if the field is there already, make sure it is the first one
- ze.addAsFirstExtraField(z64);
-
- return z64;
- }
-
- /**
- * Is there a ZIP64 extended information extra field for the
- * entry?
- *
- * @since 1.3
- */
- private boolean hasZip64Extra(final ZipArchiveEntry ze) {
- return ze.getExtraField(Zip64ExtendedInformationExtraField
- .HEADER_ID)
- != null;
- }
-
- /**
- * If the mode is AsNeeded and the entry is a compressed entry of
- * unknown size that gets written to a non-seekable stream then
- * change the default to Never.
- *
- * @since 1.3
- */
- private Zip64Mode getEffectiveZip64Mode(final ZipArchiveEntry ze) {
- if (zip64Mode != Zip64Mode.AsNeeded
- || channel != null
- || ze.getMethod() != DEFLATED
- || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) {
- return zip64Mode;
- }
- return Zip64Mode.Never;
- }
-
- private ZipEncoding getEntryEncoding(final ZipArchiveEntry ze) {
- final boolean encodable = zipEncoding.canEncode(ze.getName());
- return !encodable && fallbackToUTF8
- ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding;
- }
-
- private ByteBuffer getName(final ZipArchiveEntry ze) throws IOException {
- return getEntryEncoding(ze).encode(ze.getName());
- }
-
- /**
- * Closes the underlying stream/file without finishing the
- * archive, the result will likely be a corrupt archive.
- *
- *
This method only exists to support tests that generate
- * corrupt archives so they can clean up any temporary files.
- */
- void destroy() throws IOException {
- if (channel != null) {
- channel.close();
- }
- if (out != null) {
- out.close();
- }
- }
-
- /**
- * enum that represents the possible policies for creating Unicode
- * extra fields.
- */
- public static final class UnicodeExtraFieldPolicy {
- /**
- * Always create Unicode extra fields.
- */
- public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always");
- /**
- * Never create Unicode extra fields.
- */
- public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never");
- /**
- * Create Unicode extra fields for filenames that cannot be
- * encoded using the specified encoding.
- */
- public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE =
- new UnicodeExtraFieldPolicy("not encodeable");
-
- private final String name;
- private UnicodeExtraFieldPolicy(final String n) {
- name = n;
- }
- @Override
- public String toString() {
- return name;
- }
- }
-
- /**
- * Structure collecting information for the entry that is
- * currently being written.
- */
- private static final class CurrentEntry {
- private CurrentEntry(final ZipArchiveEntry entry) {
- this.entry = entry;
- }
- /**
- * Current ZIP entry.
- */
- private final ZipArchiveEntry entry;
- /**
- * Offset for CRC entry in the local file header data for the
- * current entry starts here.
- */
- private long localDataStart = 0;
- /**
- * Data for local header data
- */
- private long dataStart = 0;
- /**
- * Number of bytes read for the current entry (can't rely on
- * Deflater#getBytesRead) when using DEFLATED.
- */
- private long bytesRead = 0;
- /**
- * Whether current entry was the first one using ZIP64 features.
- */
- private boolean causedUseOfZip64 = false;
- /**
- * Whether write() has been called at all.
- *
- *
In order to create a valid archive {@link
- * #closeArchiveEntry closeArchiveEntry} will write an empty
- * array to get the CRC right if nothing has been written to
- * the stream at all.
- */
- private boolean hasWritten;
- }
-
- private static final class EntryMetaData {
- private final long offset;
- private final boolean usesDataDescriptor;
- private EntryMetaData(long offset, boolean usesDataDescriptor) {
- this.offset = offset;
- this.usesDataDescriptor = usesDataDescriptor;
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipConstants.java b/src/org/apache/commons/compress/archivers/zip/ZipConstants.java
deleted file mode 100644
index c230991a346..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipConstants.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-/**
- * Various constants used throughout the package.
- *
- * @since 1.3
- */
-final class ZipConstants {
- /** Masks last eight bits */
- static final int BYTE_MASK = 0xFF;
-
- /** length of a ZipShort in bytes */
- static final int SHORT = 2;
-
- /** length of a ZipLong in bytes */
- static final int WORD = 4;
-
- /** length of a ZipEightByteInteger in bytes */
- static final int DWORD = 8;
-
- /** Initial ZIP specification version */
- static final int INITIAL_VERSION = 10;
-
- /**
- * ZIP specification version that introduced DEFLATE compression method.
- * @since 1.15
- */
- static final int DEFLATE_MIN_VERSION = 20;
-
- /** ZIP specification version that introduced data descriptor method */
- static final int DATA_DESCRIPTOR_MIN_VERSION = 20;
-
- /** ZIP specification version that introduced ZIP64 */
- static final int ZIP64_MIN_VERSION = 45;
-
- /**
- * Value stored in two-byte size and similar fields if ZIP64
- * extensions are used.
- */
- static final int ZIP64_MAGIC_SHORT = 0xFFFF;
-
- /**
- * Value stored in four-byte size and similar fields if ZIP64
- * extensions are used.
- */
- static final long ZIP64_MAGIC = 0xFFFFFFFFL;
-
- private ZipConstants() { }
-
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipEightByteInteger.java b/src/org/apache/commons/compress/archivers/zip/ZipEightByteInteger.java
deleted file mode 100644
index 9d9e2ec82bd..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipEightByteInteger.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.Serializable;
-import java.math.BigInteger;
-
-import static org.apache.commons.compress.archivers.zip.ZipConstants.BYTE_MASK;
-
-/**
- * Utility class that represents an eight byte integer with conversion
- * rules for the little endian byte order of ZIP files.
- * @Immutable
- *
- * @since 1.2
- */
-public final class ZipEightByteInteger implements Serializable {
- private static final long serialVersionUID = 1L;
-
- private static final int BYTE_1 = 1;
- private static final int BYTE_1_MASK = 0xFF00;
- private static final int BYTE_1_SHIFT = 8;
-
- private static final int BYTE_2 = 2;
- private static final int BYTE_2_MASK = 0xFF0000;
- private static final int BYTE_2_SHIFT = 16;
-
- private static final int BYTE_3 = 3;
- private static final long BYTE_3_MASK = 0xFF000000L;
- private static final int BYTE_3_SHIFT = 24;
-
- private static final int BYTE_4 = 4;
- private static final long BYTE_4_MASK = 0xFF00000000L;
- private static final int BYTE_4_SHIFT = 32;
-
- private static final int BYTE_5 = 5;
- private static final long BYTE_5_MASK = 0xFF0000000000L;
- private static final int BYTE_5_SHIFT = 40;
-
- private static final int BYTE_6 = 6;
- private static final long BYTE_6_MASK = 0xFF000000000000L;
- private static final int BYTE_6_SHIFT = 48;
-
- private static final int BYTE_7 = 7;
- private static final long BYTE_7_MASK = 0x7F00000000000000L;
- private static final int BYTE_7_SHIFT = 56;
-
- private static final int LEFTMOST_BIT_SHIFT = 63;
- private static final byte LEFTMOST_BIT = (byte) 0x80;
-
- private final BigInteger value;
-
- public static final ZipEightByteInteger ZERO = new ZipEightByteInteger(0);
-
- /**
- * Create instance from a number.
- * @param value the long to store as a ZipEightByteInteger
- */
- public ZipEightByteInteger(final long value) {
- this(BigInteger.valueOf(value));
- }
-
- /**
- * Create instance from a number.
- * @param value the BigInteger to store as a ZipEightByteInteger
- */
- public ZipEightByteInteger(final BigInteger value) {
- this.value = value;
- }
-
- /**
- * Create instance from bytes.
- * @param bytes the bytes to store as a ZipEightByteInteger
- */
- public ZipEightByteInteger (final byte[] bytes) {
- this(bytes, 0);
- }
-
- /**
- * Create instance from the eight bytes starting at offset.
- * @param bytes the bytes to store as a ZipEightByteInteger
- * @param offset the offset to start
- */
- public ZipEightByteInteger (final byte[] bytes, final int offset) {
- value = ZipEightByteInteger.getValue(bytes, offset);
- }
-
- /**
- * Get value as eight bytes in big endian byte order.
- * @return value as eight bytes in big endian order
- */
- public byte[] getBytes() {
- return ZipEightByteInteger.getBytes(value);
- }
-
- /**
- * Get value as Java long.
- * @return value as a long
- */
- public long getLongValue() {
- return value.longValue();
- }
-
- /**
- * Get value as Java long.
- * @return value as a long
- */
- public BigInteger getValue() {
- return value;
- }
-
- /**
- * Get value as eight bytes in big endian byte order.
- * @param value the value to convert
- * @return value as eight bytes in big endian byte order
- */
- public static byte[] getBytes(final long value) {
- return getBytes(BigInteger.valueOf(value));
- }
-
- /**
- * Get value as eight bytes in big endian byte order.
- * @param value the value to convert
- * @return value as eight bytes in big endian byte order
- */
- public static byte[] getBytes(final BigInteger value) {
- final byte[] result = new byte[8];
- final long val = value.longValue();
- result[0] = (byte) ((val & BYTE_MASK));
- result[BYTE_1] = (byte) ((val & BYTE_1_MASK) >> BYTE_1_SHIFT);
- result[BYTE_2] = (byte) ((val & BYTE_2_MASK) >> BYTE_2_SHIFT);
- result[BYTE_3] = (byte) ((val & BYTE_3_MASK) >> BYTE_3_SHIFT);
- result[BYTE_4] = (byte) ((val & BYTE_4_MASK) >> BYTE_4_SHIFT);
- result[BYTE_5] = (byte) ((val & BYTE_5_MASK) >> BYTE_5_SHIFT);
- result[BYTE_6] = (byte) ((val & BYTE_6_MASK) >> BYTE_6_SHIFT);
- result[BYTE_7] = (byte) ((val & BYTE_7_MASK) >> BYTE_7_SHIFT);
- if (value.testBit(LEFTMOST_BIT_SHIFT)) {
- result[BYTE_7] |= LEFTMOST_BIT;
- }
- return result;
- }
-
- /**
- * Helper method to get the value as a Java long from eight bytes
- * starting at given array offset
- * @param bytes the array of bytes
- * @param offset the offset to start
- * @return the corresponding Java long value
- */
- public static long getLongValue(final byte[] bytes, final int offset) {
- return getValue(bytes, offset).longValue();
- }
-
- /**
- * Helper method to get the value as a Java BigInteger from eight
- * bytes starting at given array offset
- * @param bytes the array of bytes
- * @param offset the offset to start
- * @return the corresponding Java BigInteger value
- */
- public static BigInteger getValue(final byte[] bytes, final int offset) {
- long value = ((long) bytes[offset + BYTE_7] << BYTE_7_SHIFT) & BYTE_7_MASK;
- value += ((long) bytes[offset + BYTE_6] << BYTE_6_SHIFT) & BYTE_6_MASK;
- value += ((long) bytes[offset + BYTE_5] << BYTE_5_SHIFT) & BYTE_5_MASK;
- value += ((long) bytes[offset + BYTE_4] << BYTE_4_SHIFT) & BYTE_4_MASK;
- value += ((long) bytes[offset + BYTE_3] << BYTE_3_SHIFT) & BYTE_3_MASK;
- value += ((long) bytes[offset + BYTE_2] << BYTE_2_SHIFT) & BYTE_2_MASK;
- value += ((long) bytes[offset + BYTE_1] << BYTE_1_SHIFT) & BYTE_1_MASK;
- value += ((long) bytes[offset] & BYTE_MASK);
- final BigInteger val = BigInteger.valueOf(value);
- return (bytes[offset + BYTE_7] & LEFTMOST_BIT) == LEFTMOST_BIT
- ? val.setBit(LEFTMOST_BIT_SHIFT) : val;
- }
-
- /**
- * Helper method to get the value as a Java long from an eight-byte array
- * @param bytes the array of bytes
- * @return the corresponding Java long value
- */
- public static long getLongValue(final byte[] bytes) {
- return getLongValue(bytes, 0);
- }
-
- /**
- * Helper method to get the value as a Java long from an eight-byte array
- * @param bytes the array of bytes
- * @return the corresponding Java BigInteger value
- */
- public static BigInteger getValue(final byte[] bytes) {
- return getValue(bytes, 0);
- }
-
- /**
- * Override to make two instances with same value equal.
- * @param o an object to compare
- * @return true if the objects are equal
- */
- @Override
- public boolean equals(final Object o) {
- if (o == null || !(o instanceof ZipEightByteInteger)) {
- return false;
- }
- return value.equals(((ZipEightByteInteger) o).getValue());
- }
-
- /**
- * Override to make two instances with same value equal.
- * @return the hashCode of the value stored in the ZipEightByteInteger
- */
- @Override
- public int hashCode() {
- return value.hashCode();
- }
-
- @Override
- public String toString() {
- return "ZipEightByteInteger value: " + value;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipEncoding.java b/src/org/apache/commons/compress/archivers/zip/ZipEncoding.java
deleted file mode 100644
index dacd063699e..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipEncoding.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-/**
- * An interface for encoders that do a pretty encoding of ZIP
- * filenames.
- *
- *
There are mostly two implementations, one that uses java.nio
- * {@link java.nio.charset.Charset Charset} and one implementation,
- * which copes with simple 8 bit charsets, because java-1.4 did not
- * support Cp437 in java.nio.
- *
- *
The main reason for defining an own encoding layer comes from
- * the problems with {@link java.lang.String#getBytes(String)
- * String.getBytes}, which encodes unknown characters as ASCII
- * quotation marks ('?'). Quotation marks are per definition an
- * invalid filename on some operating systems like Windows, which
- * leads to ignored ZIP entries.
- *
- *
All implementations should implement this interface in a
- * reentrant way.
- */
-public interface ZipEncoding {
- /**
- * Check, whether the given string may be losslessly encoded using this
- * encoding.
- *
- * @param name A filename or ZIP comment.
- * @return Whether the given name may be encoded with out any losses.
- */
- boolean canEncode(String name);
-
- /**
- * Encode a filename or a comment to a byte array suitable for
- * storing it to a serialized zip entry.
- *
- *
Examples for CP 437 (in pseudo-notation, right hand side is
- * C-style notation):
- *
- * @param name A filename or ZIP comment.
- * @return A byte buffer with a backing array containing the
- * encoded name. Unmappable characters or malformed
- * character sequences are mapped to a sequence of utf-16
- * words encoded in the format %Uxxxx. It is
- * assumed, that the byte buffer is positioned at the
- * beginning of the encoded result, the byte buffer has a
- * backing array and the limit of the byte buffer points
- * to the end of the encoded result.
- * @throws IOException on error
- */
- ByteBuffer encode(String name) throws IOException;
-
- /**
- * @param data The byte values to decode.
- * @return The decoded string.
- * @throws IOException on error
- */
- String decode(byte [] data) throws IOException;
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipEncodingHelper.java b/src/org/apache/commons/compress/archivers/zip/ZipEncodingHelper.java
deleted file mode 100644
index 8aeb789e207..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipEncodingHelper.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.archivers.zip;
-
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-import java.nio.charset.UnsupportedCharsetException;
-
-/**
- * Static helper functions for robustly encoding filenames in zip files.
- */
-public abstract class ZipEncodingHelper {
-
-
- /**
- * name of the encoding UTF-8
- */
- static final String UTF8 = "UTF8";
-
- /**
- * the encoding UTF-8
- */
- static final ZipEncoding UTF8_ZIP_ENCODING = getZipEncoding(UTF8);
-
- /**
- * Instantiates a zip encoding. An NIO based character set encoder/decoder will be returned.
- * As a special case, if the character set is UTF-8, the nio encoder will be configured replace malformed and
- * unmappable characters with '?'. This matches existing behavior from the older fallback encoder.
- *
- * If the requested characer set cannot be found, the platform default will
- * be used instead.
- *
- * @param name The name of the zip encoding. Specify {@code null} for
- * the platform's default encoding.
- * @return A zip encoding for the given encoding name.
- */
- public static ZipEncoding getZipEncoding(final String name) {
- Charset cs = Charset.defaultCharset();
- if (name != null) {
- try {
- cs = Charset.forName(name);
- } catch (UnsupportedCharsetException e) { // NOSONAR we use the default encoding instead
- }
- }
- boolean useReplacement = isUTF8(cs.name());
- return new NioZipEncoding(cs, useReplacement);
- }
-
- /**
- * Returns whether a given encoding is UTF-8. If the given name is null, then check the platform's default encoding.
- *
- * @param charsetName If the given name is null, then check the platform's default encoding.
- */
- static boolean isUTF8(String charsetName) {
- if (charsetName == null) {
- // check platform's default encoding
- charsetName = Charset.defaultCharset().name();
- }
- if (StandardCharsets.UTF_8.name().equalsIgnoreCase(charsetName)) {
- return true;
- }
- for (final String alias : StandardCharsets.UTF_8.aliases()) {
- if (alias.equalsIgnoreCase(charsetName)) {
- return true;
- }
- }
- return false;
- }
-
- static ByteBuffer growBufferBy(ByteBuffer buffer, int increment) {
- buffer.limit(buffer.position());
- buffer.rewind();
-
- final ByteBuffer on = ByteBuffer.allocate(buffer.capacity() + increment);
-
- on.put(buffer);
- return on;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipExtraField.java b/src/org/apache/commons/compress/archivers/zip/ZipExtraField.java
deleted file mode 100644
index 2c44b2a52da..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipExtraField.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.util.zip.ZipException;
-
-/**
- * General format of extra field data.
- *
- *
Extra fields usually appear twice per file, once in the local
- * file data and once in the central directory. Usually they are the
- * same, but they don't have to be. {@link
- * java.util.zip.ZipOutputStream java.util.zip.ZipOutputStream} will
- * only use the local file data in both places.
- *
- */
-public interface ZipExtraField {
- /**
- * Size of an extra field field header (id + length).
- * @since 1.14
- */
- int EXTRAFIELD_HEADER_SIZE = 4;
-
- /**
- * The Header-ID.
- *
- * @return The HeaderId value
- */
- ZipShort getHeaderId();
-
- /**
- * Length of the extra field in the local file data - without
- * Header-ID or length specifier.
- * @return the length of the field in the local file data
- */
- ZipShort getLocalFileDataLength();
-
- /**
- * Length of the extra field in the central directory - without
- * Header-ID or length specifier.
- * @return the length of the field in the central directory
- */
- ZipShort getCentralDirectoryLength();
-
- /**
- * The actual data to put into local file data - without Header-ID
- * or length specifier.
- * @return the data
- */
- byte[] getLocalFileDataData();
-
- /**
- * The actual data to put into central directory - without Header-ID or
- * length specifier.
- * @return the data
- */
- byte[] getCentralDirectoryData();
-
- /**
- * Populate data from this array as if it was in local file data.
- *
- * @param buffer the buffer to read data from
- * @param offset offset into buffer to read data
- * @param length the length of data
- * @throws ZipException on error
- */
- void parseFromLocalFileData(byte[] buffer, int offset, int length)
- throws ZipException;
-
- /**
- * Populate data from this array as if it was in central directory data.
- *
- * @param buffer the buffer to read data from
- * @param offset offset into buffer to read data
- * @param length the length of data
- * @throws ZipException on error
- */
- void parseFromCentralDirectoryData(byte[] buffer, int offset, int length)
- throws ZipException;
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipFile.java b/src/org/apache/commons/compress/archivers/zip/ZipFile.java
deleted file mode 100644
index 6beedcb52d2..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipFile.java
+++ /dev/null
@@ -1,1278 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.BufferedInputStream;
-import java.io.ByteArrayInputStream;
-import java.io.Closeable;
-import java.io.EOFException;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.SequenceInputStream;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.channels.SeekableByteChannel;
-import java.nio.file.Files;
-import java.nio.file.StandardOpenOption;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Enumeration;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.zip.Inflater;
-import java.util.zip.ZipException;
-
-import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
-import org.apache.commons.compress.compressors.deflate64.Deflate64CompressorInputStream;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC;
-import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC_SHORT;
-
-/**
- * Replacement for java.util.ZipFile.
- *
- *
This class adds support for file name encodings other than UTF-8
- * (which is required to work on ZIP files created by native zip tools
- * and is able to skip a preamble like the one found in self
- * extracting archives. Furthermore it returns instances of
- * org.apache.commons.compress.archivers.zip.ZipArchiveEntry
- * instead of java.util.zip.ZipEntry.
- *
- *
It doesn't extend java.util.zip.ZipFile as it would
- * have to reimplement all methods anyway. Like
- * java.util.ZipFile, it uses SeekableByteChannel under the
- * covers and supports compressed and uncompressed entries. As of
- * Apache Commons Compress 1.3 it also transparently supports Zip64
- * extensions and thus individual entries and archives larger than 4
- * GB or with more than 65536 entries.
- *
- *
The method signatures mimic the ones of
- * java.util.zip.ZipFile, with a couple of exceptions:
- *
- *
- *
There is no getName method.
- *
entries has been renamed to getEntries.
- *
getEntries and getEntry return
- * org.apache.commons.compress.archivers.zip.ZipArchiveEntry
- * instances.
- *
close is allowed to throw IOException.
- *
- *
- */
-public class ZipFile implements Closeable {
- private static final int HASH_SIZE = 509;
- static final int NIBLET_MASK = 0x0f;
- static final int BYTE_SHIFT = 8;
- private static final int POS_0 = 0;
- private static final int POS_1 = 1;
- private static final int POS_2 = 2;
- private static final int POS_3 = 3;
- private static final byte[] ONE_ZERO_BYTE = new byte[1];
-
- /**
- * List of entries in the order they appear inside the central
- * directory.
- */
- private final List entries =
- new LinkedList<>();
-
- /**
- * Maps String to list of ZipArchiveEntrys, name -> actual entries.
- */
- private final Map> nameMap =
- new HashMap<>(HASH_SIZE);
-
- /**
- * The encoding to use for filenames and the file comment.
- *
- *
- */
- private final String encoding;
-
- /**
- * The zip encoding to use for filenames and the file comment.
- */
- private final ZipEncoding zipEncoding;
-
- /**
- * File name of actual source.
- */
- private final String archiveName;
-
- /**
- * The actual data source.
- */
- private final SeekableByteChannel archive;
-
- /**
- * Whether to look for and use Unicode extra fields.
- */
- private final boolean useUnicodeExtraFields;
-
- /**
- * Whether the file is closed.
- */
- private volatile boolean closed = true;
-
- // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection)
- private final byte[] dwordBuf = new byte[DWORD];
- private final byte[] wordBuf = new byte[WORD];
- private final byte[] cfhBuf = new byte[CFH_LEN];
- private final byte[] shortBuf = new byte[SHORT];
- private final ByteBuffer dwordBbuf = ByteBuffer.wrap(dwordBuf);
- private final ByteBuffer wordBbuf = ByteBuffer.wrap(wordBuf);
- private final ByteBuffer cfhBbuf = ByteBuffer.wrap(cfhBuf);
-
- /**
- * Opens the given file for reading, assuming "UTF8" for file names.
- *
- * @param f the archive.
- *
- * @throws IOException if an error occurs while reading the file.
- */
- public ZipFile(final File f) throws IOException {
- this(f, ZipEncodingHelper.UTF8);
- }
-
- /**
- * Opens the given file for reading, assuming "UTF8".
- *
- * @param name name of the archive.
- *
- * @throws IOException if an error occurs while reading the file.
- */
- public ZipFile(final String name) throws IOException {
- this(new File(name), ZipEncodingHelper.UTF8);
- }
-
- /**
- * Opens the given file for reading, assuming the specified
- * encoding for file names, scanning unicode extra fields.
- *
- * @param name name of the archive.
- * @param encoding the encoding to use for file names, use null
- * for the platform's default encoding
- *
- * @throws IOException if an error occurs while reading the file.
- */
- public ZipFile(final String name, final String encoding) throws IOException {
- this(new File(name), encoding, true);
- }
-
- /**
- * Opens the given file for reading, assuming the specified
- * encoding for file names and scanning for unicode extra fields.
- *
- * @param f the archive.
- * @param encoding the encoding to use for file names, use null
- * for the platform's default encoding
- *
- * @throws IOException if an error occurs while reading the file.
- */
- public ZipFile(final File f, final String encoding) throws IOException {
- this(f, encoding, true);
- }
-
- /**
- * Opens the given file for reading, assuming the specified
- * encoding for file names.
- *
- * @param f the archive.
- * @param encoding the encoding to use for file names, use null
- * for the platform's default encoding
- * @param useUnicodeExtraFields whether to use InfoZIP Unicode
- * Extra Fields (if present) to set the file names.
- *
- * @throws IOException if an error occurs while reading the file.
- */
- public ZipFile(final File f, final String encoding, final boolean useUnicodeExtraFields)
- throws IOException {
- this(Files.newByteChannel(f.toPath(), EnumSet.of(StandardOpenOption.READ)),
- f.getAbsolutePath(), encoding, useUnicodeExtraFields, true);
- }
-
- /**
- * Opens the given channel for reading, assuming "UTF8" for file names.
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to read from an in-memory archive.
- *
- * @param channel the archive.
- *
- * @throws IOException if an error occurs while reading the file.
- * @since 1.13
- */
- public ZipFile(final SeekableByteChannel channel)
- throws IOException {
- this(channel, "unknown archive", ZipEncodingHelper.UTF8, true);
- }
-
- /**
- * Opens the given channel for reading, assuming the specified
- * encoding for file names.
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to read from an in-memory archive.
- *
- * @param channel the archive.
- * @param encoding the encoding to use for file names, use null
- * for the platform's default encoding
- *
- * @throws IOException if an error occurs while reading the file.
- * @since 1.13
- */
- public ZipFile(final SeekableByteChannel channel, final String encoding)
- throws IOException {
- this(channel, "unknown archive", encoding, true);
- }
-
- /**
- * Opens the given channel for reading, assuming the specified
- * encoding for file names.
- *
- *
{@link
- * org.apache.commons.compress.utils.SeekableInMemoryByteChannel}
- * allows you to read from an in-memory archive.
- *
- * @param channel the archive.
- * @param archiveName name of the archive, used for error messages only.
- * @param encoding the encoding to use for file names, use null
- * for the platform's default encoding
- * @param useUnicodeExtraFields whether to use InfoZIP Unicode
- * Extra Fields (if present) to set the file names.
- *
- * @throws IOException if an error occurs while reading the file.
- * @since 1.13
- */
- public ZipFile(final SeekableByteChannel channel, final String archiveName,
- final String encoding, final boolean useUnicodeExtraFields)
- throws IOException {
- this(channel, archiveName, encoding, useUnicodeExtraFields, false);
- }
-
- private ZipFile(final SeekableByteChannel channel, final String archiveName,
- final String encoding, final boolean useUnicodeExtraFields,
- final boolean closeOnError)
- throws IOException {
- this.archiveName = archiveName;
- this.encoding = encoding;
- this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
- this.useUnicodeExtraFields = useUnicodeExtraFields;
- archive = channel;
- boolean success = false;
- try {
- final Map entriesWithoutUTF8Flag =
- populateFromCentralDirectory();
- resolveLocalFileHeaderData(entriesWithoutUTF8Flag);
- success = true;
- } finally {
- closed = !success;
- if (!success && closeOnError) {
- IOUtils.closeQuietly(archive);
- }
- }
- }
-
- /**
- * The encoding to use for filenames and the file comment.
- *
- * @return null if using the platform's default character encoding.
- */
- public String getEncoding() {
- return encoding;
- }
-
- /**
- * Closes the archive.
- * @throws IOException if an error occurs closing the archive.
- */
- @Override
- public void close() throws IOException {
- // this flag is only written here and read in finalize() which
- // can never be run in parallel.
- // no synchronization needed.
- closed = true;
-
- archive.close();
- }
-
- /**
- * close a zipfile quietly; throw no io fault, do nothing
- * on a null parameter
- * @param zipfile file to close, can be null
- */
- public static void closeQuietly(final ZipFile zipfile) {
- IOUtils.closeQuietly(zipfile);
- }
-
- /**
- * Returns all entries.
- *
- *
Entries will be returned in the same order they appear
- * within the archive's central directory.
- *
- * @return all entries as {@link ZipArchiveEntry} instances
- */
- public Enumeration getEntries() {
- return Collections.enumeration(entries);
- }
-
- /**
- * Returns all entries in physical order.
- *
- *
Entries will be returned in the same order their contents
- * appear within the archive.
- *
- * @return all entries as {@link ZipArchiveEntry} instances
- *
- * @since 1.1
- */
- public Enumeration getEntriesInPhysicalOrder() {
- final ZipArchiveEntry[] allEntries = entries.toArray(new ZipArchiveEntry[entries.size()]);
- Arrays.sort(allEntries, offsetComparator);
- return Collections.enumeration(Arrays.asList(allEntries));
- }
-
- /**
- * Returns a named entry - or {@code null} if no entry by
- * that name exists.
- *
- *
If multiple entries with the same name exist the first entry
- * in the archive's central directory by that name is
- * returned.
- *
- * @param name name of the entry.
- * @return the ZipArchiveEntry corresponding to the given name - or
- * {@code null} if not present.
- */
- public ZipArchiveEntry getEntry(final String name) {
- final LinkedList entriesOfThatName = nameMap.get(name);
- return entriesOfThatName != null ? entriesOfThatName.getFirst() : null;
- }
-
- /**
- * Returns all named entries in the same order they appear within
- * the archive's central directory.
- *
- * @param name name of the entry.
- * @return the Iterable<ZipArchiveEntry> corresponding to the
- * given name
- * @since 1.6
- */
- public Iterable getEntries(final String name) {
- final List entriesOfThatName = nameMap.get(name);
- return entriesOfThatName != null ? entriesOfThatName
- : Collections.emptyList();
- }
-
- /**
- * Returns all named entries in the same order their contents
- * appear within the archive.
- *
- * @param name name of the entry.
- * @return the Iterable<ZipArchiveEntry> corresponding to the
- * given name
- * @since 1.6
- */
- public Iterable getEntriesInPhysicalOrder(final String name) {
- ZipArchiveEntry[] entriesOfThatName = new ZipArchiveEntry[0];
- if (nameMap.containsKey(name)) {
- entriesOfThatName = nameMap.get(name).toArray(entriesOfThatName);
- Arrays.sort(entriesOfThatName, offsetComparator);
- }
- return Arrays.asList(entriesOfThatName);
- }
-
- /**
- * Whether this class is able to read the given entry.
- *
- *
May return false if it is set up to use encryption or a
- * compression method that hasn't been implemented yet.
- * @since 1.1
- * @param ze the entry
- * @return whether this class is able to read the given entry.
- */
- public boolean canReadEntryData(final ZipArchiveEntry ze) {
- return ZipUtil.canHandleEntryData(ze);
- }
-
- /**
- * Expose the raw stream of the archive entry (compressed form).
- *
- *
This method does not relate to how/if we understand the payload in the
- * stream, since we really only intend to move it on to somewhere else.
- *
- * @param ze The entry to get the stream for
- * @return The raw input stream containing (possibly) compressed data.
- * @since 1.11
- */
- public InputStream getRawInputStream(final ZipArchiveEntry ze) {
- if (!(ze instanceof Entry)) {
- return null;
- }
- final long start = ze.getDataOffset();
- return createBoundedInputStream(start, ze.getCompressedSize());
- }
-
-
- /**
- * Transfer selected entries from this zipfile to a given #ZipArchiveOutputStream.
- * Compression and all other attributes will be as in this file.
- *
This method transfers entries based on the central directory of the zip file.
- *
- * @param target The zipArchiveOutputStream to write the entries to
- * @param predicate A predicate that selects which entries to write
- * @throws IOException on error
- */
- public void copyRawEntries(final ZipArchiveOutputStream target, final ZipArchiveEntryPredicate predicate)
- throws IOException {
- final Enumeration src = getEntriesInPhysicalOrder();
- while (src.hasMoreElements()) {
- final ZipArchiveEntry entry = src.nextElement();
- if (predicate.test( entry)) {
- target.addRawArchiveEntry(entry, getRawInputStream(entry));
- }
- }
- }
-
- /**
- * Returns an InputStream for reading the contents of the given entry.
- *
- * @param ze the entry to get the stream for.
- * @return a stream to read the entry from. The returned stream
- * implements {@link InputStreamStatistics}.
- * @throws IOException if unable to create an input stream from the zipentry
- */
- public InputStream getInputStream(final ZipArchiveEntry ze)
- throws IOException {
- if (!(ze instanceof Entry)) {
- return null;
- }
- // cast validity is checked just above
- ZipUtil.checkRequestedFeatures(ze);
- final long start = ze.getDataOffset();
-
- // doesn't get closed if the method is not supported - which
- // should never happen because of the checkRequestedFeatures
- // call above
- final InputStream is =
- new BufferedInputStream(createBoundedInputStream(start, ze.getCompressedSize())); //NOSONAR
- switch (ZipMethod.getMethodByCode(ze.getMethod())) {
- case STORED:
- return new StoredStatisticsStream(is);
- case UNSHRINKING:
- return new UnshrinkingInputStream(is);
- case IMPLODING:
- return new ExplodingInputStream(ze.getGeneralPurposeBit().getSlidingDictionarySize(),
- ze.getGeneralPurposeBit().getNumberOfShannonFanoTrees(), is);
- case DEFLATED:
- final Inflater inflater = new Inflater(true);
- // Inflater with nowrap=true has this odd contract for a zero padding
- // byte following the data stream; this used to be zlib's requirement
- // and has been fixed a long time ago, but the contract persists so
- // we comply.
- // https://docs.oracle.com/javase/7/docs/api/java/util/zip/Inflater.html#Inflater(boolean)
- return new InflaterInputStreamWithStatistics(new SequenceInputStream(is, new ByteArrayInputStream(ONE_ZERO_BYTE)),
- inflater) {
- @Override
- public void close() throws IOException {
- try {
- super.close();
- } finally {
- inflater.end();
- }
- }
- };
- case BZIP2:
- return new BZip2CompressorInputStream(is);
- case ENHANCED_DEFLATED:
- return new Deflate64CompressorInputStream(is);
- case AES_ENCRYPTED:
- case EXPANDING_LEVEL_1:
- case EXPANDING_LEVEL_2:
- case EXPANDING_LEVEL_3:
- case EXPANDING_LEVEL_4:
- case JPEG:
- case LZMA:
- case PKWARE_IMPLODING:
- case PPMD:
- case TOKENIZATION:
- case UNKNOWN:
- case WAVPACK:
- case XZ:
- default:
- throw new ZipException("Found unsupported compression method "
- + ze.getMethod());
- }
- }
-
- /**
- *
- * Convenience method to return the entry's content as a String if isUnixSymlink()
- * returns true for it, otherwise returns null.
- *
- *
- *
This method assumes the symbolic link's file name uses the
- * same encoding that as been specified for this ZipFile.
- *
- * @param entry ZipArchiveEntry object that represents the symbolic link
- * @return entry's content as a String
- * @throws IOException problem with content's input stream
- * @since 1.5
- */
- public String getUnixSymlink(final ZipArchiveEntry entry) throws IOException {
- if (entry != null && entry.isUnixSymlink()) {
- try (InputStream in = getInputStream(entry)) {
- return zipEncoding.decode(IOUtils.toByteArray(in));
- }
- }
- return null;
- }
-
- /**
- * Ensures that the close method of this zipfile is called when
- * there are no more references to it.
- * @see #close()
- */
- @Override
- protected void finalize() throws Throwable {
- try {
- if (!closed) {
- System.err.println("Cleaning up unclosed ZipFile for archive "
- + archiveName);
- close();
- }
- } finally {
- super.finalize();
- }
- }
-
- /**
- * Length of a "central directory" entry structure without file
- * name, extra fields or comment.
- */
- private static final int CFH_LEN =
- /* version made by */ SHORT
- /* version needed to extract */ + SHORT
- /* general purpose bit flag */ + SHORT
- /* compression method */ + SHORT
- /* last mod file time */ + SHORT
- /* last mod file date */ + SHORT
- /* crc-32 */ + WORD
- /* compressed size */ + WORD
- /* uncompressed size */ + WORD
- /* filename length */ + SHORT
- /* extra field length */ + SHORT
- /* file comment length */ + SHORT
- /* disk number start */ + SHORT
- /* internal file attributes */ + SHORT
- /* external file attributes */ + WORD
- /* relative offset of local header */ + WORD;
-
- private static final long CFH_SIG =
- ZipLong.getValue(ZipArchiveOutputStream.CFH_SIG);
-
- /**
- * Reads the central directory of the given archive and populates
- * the internal tables with ZipArchiveEntry instances.
- *
- *
The ZipArchiveEntrys will know all data that can be obtained from
- * the central directory alone, but not the data that requires the
- * local file header or additional data to be read.
- *
- * @return a map of zipentries that didn't have the language
- * encoding flag set when read.
- */
- private Map populateFromCentralDirectory()
- throws IOException {
- final HashMap noUTF8Flag =
- new HashMap<>();
-
- positionAtCentralDirectory();
-
- wordBbuf.rewind();
- IOUtils.readFully(archive, wordBbuf);
- long sig = ZipLong.getValue(wordBuf);
-
- if (sig != CFH_SIG && startsWithLocalFileHeader()) {
- throw new IOException("central directory is empty, can't expand"
- + " corrupt archive.");
- }
-
- while (sig == CFH_SIG) {
- readCentralDirectoryEntry(noUTF8Flag);
- wordBbuf.rewind();
- IOUtils.readFully(archive, wordBbuf);
- sig = ZipLong.getValue(wordBuf);
- }
- return noUTF8Flag;
- }
-
- /**
- * Reads an individual entry of the central directory, creats an
- * ZipArchiveEntry from it and adds it to the global maps.
- *
- * @param noUTF8Flag map used to collect entries that don't have
- * their UTF-8 flag set and whose name will be set by data read
- * from the local file header later. The current entry may be
- * added to this map.
- */
- private void
- readCentralDirectoryEntry(final Map noUTF8Flag)
- throws IOException {
- cfhBbuf.rewind();
- IOUtils.readFully(archive, cfhBbuf);
- int off = 0;
- final Entry ze = new Entry();
-
- final int versionMadeBy = ZipShort.getValue(cfhBuf, off);
- off += SHORT;
- ze.setVersionMadeBy(versionMadeBy);
- ze.setPlatform((versionMadeBy >> BYTE_SHIFT) & NIBLET_MASK);
-
- ze.setVersionRequired(ZipShort.getValue(cfhBuf, off));
- off += SHORT; // version required
-
- final GeneralPurposeBit gpFlag = GeneralPurposeBit.parse(cfhBuf, off);
- final boolean hasUTF8Flag = gpFlag.usesUTF8ForNames();
- final ZipEncoding entryEncoding =
- hasUTF8Flag ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding;
- if (hasUTF8Flag) {
- ze.setNameSource(ZipArchiveEntry.NameSource.NAME_WITH_EFS_FLAG);
- }
- ze.setGeneralPurposeBit(gpFlag);
- ze.setRawFlag(ZipShort.getValue(cfhBuf, off));
-
- off += SHORT;
-
- //noinspection MagicConstant
- ze.setMethod(ZipShort.getValue(cfhBuf, off));
- off += SHORT;
-
- final long time = ZipUtil.dosToJavaTime(ZipLong.getValue(cfhBuf, off));
- ze.setTime(time);
- off += WORD;
-
- ze.setCrc(ZipLong.getValue(cfhBuf, off));
- off += WORD;
-
- ze.setCompressedSize(ZipLong.getValue(cfhBuf, off));
- off += WORD;
-
- ze.setSize(ZipLong.getValue(cfhBuf, off));
- off += WORD;
-
- final int fileNameLen = ZipShort.getValue(cfhBuf, off);
- off += SHORT;
-
- final int extraLen = ZipShort.getValue(cfhBuf, off);
- off += SHORT;
-
- final int commentLen = ZipShort.getValue(cfhBuf, off);
- off += SHORT;
-
- final int diskStart = ZipShort.getValue(cfhBuf, off);
- off += SHORT;
-
- ze.setInternalAttributes(ZipShort.getValue(cfhBuf, off));
- off += SHORT;
-
- ze.setExternalAttributes(ZipLong.getValue(cfhBuf, off));
- off += WORD;
-
- final byte[] fileName = new byte[fileNameLen];
- IOUtils.readFully(archive, ByteBuffer.wrap(fileName));
- ze.setName(entryEncoding.decode(fileName), fileName);
-
- // LFH offset,
- ze.setLocalHeaderOffset(ZipLong.getValue(cfhBuf, off));
- // data offset will be filled later
- entries.add(ze);
-
- final byte[] cdExtraData = new byte[extraLen];
- IOUtils.readFully(archive, ByteBuffer.wrap(cdExtraData));
- ze.setCentralDirectoryExtra(cdExtraData);
-
- setSizesAndOffsetFromZip64Extra(ze, diskStart);
-
- final byte[] comment = new byte[commentLen];
- IOUtils.readFully(archive, ByteBuffer.wrap(comment));
- ze.setComment(entryEncoding.decode(comment));
-
- if (!hasUTF8Flag && useUnicodeExtraFields) {
- noUTF8Flag.put(ze, new NameAndComment(fileName, comment));
- }
- }
-
- /**
- * If the entry holds a Zip64 extended information extra field,
- * read sizes from there if the entry's sizes are set to
- * 0xFFFFFFFFF, do the same for the offset of the local file
- * header.
- *
- *
Ensures the Zip64 extra either knows both compressed and
- * uncompressed size or neither of both as the internal logic in
- * ExtraFieldUtils forces the field to create local header data
- * even if they are never used - and here a field with only one
- * size would be invalid.
- */
- private void setSizesAndOffsetFromZip64Extra(final ZipArchiveEntry ze,
- final int diskStart)
- throws IOException {
- final Zip64ExtendedInformationExtraField z64 =
- (Zip64ExtendedInformationExtraField)
- ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID);
- if (z64 != null) {
- final boolean hasUncompressedSize = ze.getSize() == ZIP64_MAGIC;
- final boolean hasCompressedSize = ze.getCompressedSize() == ZIP64_MAGIC;
- final boolean hasRelativeHeaderOffset =
- ze.getLocalHeaderOffset() == ZIP64_MAGIC;
- z64.reparseCentralDirectoryData(hasUncompressedSize,
- hasCompressedSize,
- hasRelativeHeaderOffset,
- diskStart == ZIP64_MAGIC_SHORT);
-
- if (hasUncompressedSize) {
- ze.setSize(z64.getSize().getLongValue());
- } else if (hasCompressedSize) {
- z64.setSize(new ZipEightByteInteger(ze.getSize()));
- }
-
- if (hasCompressedSize) {
- ze.setCompressedSize(z64.getCompressedSize().getLongValue());
- } else if (hasUncompressedSize) {
- z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize()));
- }
-
- if (hasRelativeHeaderOffset) {
- ze.setLocalHeaderOffset(z64.getRelativeHeaderOffset().getLongValue());
- }
- }
- }
-
- /**
- * Length of the "End of central directory record" - which is
- * supposed to be the last structure of the archive - without file
- * comment.
- */
- static final int MIN_EOCD_SIZE =
- /* end of central dir signature */ WORD
- /* number of this disk */ + SHORT
- /* number of the disk with the */
- /* start of the central directory */ + SHORT
- /* total number of entries in */
- /* the central dir on this disk */ + SHORT
- /* total number of entries in */
- /* the central dir */ + SHORT
- /* size of the central directory */ + WORD
- /* offset of start of central */
- /* directory with respect to */
- /* the starting disk number */ + WORD
- /* zipfile comment length */ + SHORT;
-
- /**
- * Maximum length of the "End of central directory record" with a
- * file comment.
- */
- private static final int MAX_EOCD_SIZE = MIN_EOCD_SIZE
- /* maximum length of zipfile comment */ + ZIP64_MAGIC_SHORT;
-
- /**
- * Offset of the field that holds the location of the first
- * central directory entry inside the "End of central directory
- * record" relative to the start of the "End of central directory
- * record".
- */
- private static final int CFD_LOCATOR_OFFSET =
- /* end of central dir signature */ WORD
- /* number of this disk */ + SHORT
- /* number of the disk with the */
- /* start of the central directory */ + SHORT
- /* total number of entries in */
- /* the central dir on this disk */ + SHORT
- /* total number of entries in */
- /* the central dir */ + SHORT
- /* size of the central directory */ + WORD;
-
- /**
- * Length of the "Zip64 end of central directory locator" - which
- * should be right in front of the "end of central directory
- * record" if one is present at all.
- */
- private static final int ZIP64_EOCDL_LENGTH =
- /* zip64 end of central dir locator sig */ WORD
- /* number of the disk with the start */
- /* start of the zip64 end of */
- /* central directory */ + WORD
- /* relative offset of the zip64 */
- /* end of central directory record */ + DWORD
- /* total number of disks */ + WORD;
-
- /**
- * Offset of the field that holds the location of the "Zip64 end
- * of central directory record" inside the "Zip64 end of central
- * directory locator" relative to the start of the "Zip64 end of
- * central directory locator".
- */
- private static final int ZIP64_EOCDL_LOCATOR_OFFSET =
- /* zip64 end of central dir locator sig */ WORD
- /* number of the disk with the start */
- /* start of the zip64 end of */
- /* central directory */ + WORD;
-
- /**
- * Offset of the field that holds the location of the first
- * central directory entry inside the "Zip64 end of central
- * directory record" relative to the start of the "Zip64 end of
- * central directory record".
- */
- private static final int ZIP64_EOCD_CFD_LOCATOR_OFFSET =
- /* zip64 end of central dir */
- /* signature */ WORD
- /* size of zip64 end of central */
- /* directory record */ + DWORD
- /* version made by */ + SHORT
- /* version needed to extract */ + SHORT
- /* number of this disk */ + WORD
- /* number of the disk with the */
- /* start of the central directory */ + WORD
- /* total number of entries in the */
- /* central directory on this disk */ + DWORD
- /* total number of entries in the */
- /* central directory */ + DWORD
- /* size of the central directory */ + DWORD;
-
- /**
- * Searches for either the "Zip64 end of central directory
- * locator" or the "End of central dir record", parses
- * it and positions the stream at the first central directory
- * record.
- */
- private void positionAtCentralDirectory()
- throws IOException {
- positionAtEndOfCentralDirectoryRecord();
- boolean found = false;
- final boolean searchedForZip64EOCD =
- archive.position() > ZIP64_EOCDL_LENGTH;
- if (searchedForZip64EOCD) {
- archive.position(archive.position() - ZIP64_EOCDL_LENGTH);
- wordBbuf.rewind();
- IOUtils.readFully(archive, wordBbuf);
- found = Arrays.equals(ZipArchiveOutputStream.ZIP64_EOCD_LOC_SIG,
- wordBuf);
- }
- if (!found) {
- // not a ZIP64 archive
- if (searchedForZip64EOCD) {
- skipBytes(ZIP64_EOCDL_LENGTH - WORD);
- }
- positionAtCentralDirectory32();
- } else {
- positionAtCentralDirectory64();
- }
- }
-
- /**
- * Parses the "Zip64 end of central directory locator",
- * finds the "Zip64 end of central directory record" using the
- * parsed information, parses that and positions the stream at the
- * first central directory record.
- *
- * Expects stream to be positioned right behind the "Zip64
- * end of central directory locator"'s signature.
- */
- private void positionAtCentralDirectory64()
- throws IOException {
- skipBytes(ZIP64_EOCDL_LOCATOR_OFFSET
- - WORD /* signature has already been read */);
- dwordBbuf.rewind();
- IOUtils.readFully(archive, dwordBbuf);
- archive.position(ZipEightByteInteger.getLongValue(dwordBuf));
- wordBbuf.rewind();
- IOUtils.readFully(archive, wordBbuf);
- if (!Arrays.equals(wordBuf, ZipArchiveOutputStream.ZIP64_EOCD_SIG)) {
- throw new ZipException("archive's ZIP64 end of central "
- + "directory locator is corrupt.");
- }
- skipBytes(ZIP64_EOCD_CFD_LOCATOR_OFFSET
- - WORD /* signature has already been read */);
- dwordBbuf.rewind();
- IOUtils.readFully(archive, dwordBbuf);
- archive.position(ZipEightByteInteger.getLongValue(dwordBuf));
- }
-
- /**
- * Parses the "End of central dir record" and positions
- * the stream at the first central directory record.
- *
- * Expects stream to be positioned at the beginning of the
- * "End of central dir record".
- */
- private void positionAtCentralDirectory32()
- throws IOException {
- skipBytes(CFD_LOCATOR_OFFSET);
- wordBbuf.rewind();
- IOUtils.readFully(archive, wordBbuf);
- archive.position(ZipLong.getValue(wordBuf));
- }
-
- /**
- * Searches for the and positions the stream at the start of the
- * "End of central dir record".
- */
- private void positionAtEndOfCentralDirectoryRecord()
- throws IOException {
- final boolean found = tryToLocateSignature(MIN_EOCD_SIZE, MAX_EOCD_SIZE,
- ZipArchiveOutputStream.EOCD_SIG);
- if (!found) {
- throw new ZipException("archive is not a ZIP archive");
- }
- }
-
- /**
- * Searches the archive backwards from minDistance to maxDistance
- * for the given signature, positions the RandomaccessFile right
- * at the signature if it has been found.
- */
- private boolean tryToLocateSignature(final long minDistanceFromEnd,
- final long maxDistanceFromEnd,
- final byte[] sig) throws IOException {
- boolean found = false;
- long off = archive.size() - minDistanceFromEnd;
- final long stopSearching =
- Math.max(0L, archive.size() - maxDistanceFromEnd);
- if (off >= 0) {
- for (; off >= stopSearching; off--) {
- archive.position(off);
- try {
- wordBbuf.rewind();
- IOUtils.readFully(archive, wordBbuf);
- wordBbuf.flip();
- } catch (EOFException ex) {
- break;
- }
- int curr = wordBbuf.get();
- if (curr == sig[POS_0]) {
- curr = wordBbuf.get();
- if (curr == sig[POS_1]) {
- curr = wordBbuf.get();
- if (curr == sig[POS_2]) {
- curr = wordBbuf.get();
- if (curr == sig[POS_3]) {
- found = true;
- break;
- }
- }
- }
- }
- }
- }
- if (found) {
- archive.position(off);
- }
- return found;
- }
-
- /**
- * Skips the given number of bytes or throws an EOFException if
- * skipping failed.
- */
- private void skipBytes(final int count) throws IOException {
- long currentPosition = archive.position();
- long newPosition = currentPosition + count;
- if (newPosition > archive.size()) {
- throw new EOFException();
- }
- archive.position(newPosition);
- }
-
- /**
- * Number of bytes in local file header up to the "length of
- * filename" entry.
- */
- private static final long LFH_OFFSET_FOR_FILENAME_LENGTH =
- /* local file header signature */ WORD
- /* version needed to extract */ + SHORT
- /* general purpose bit flag */ + SHORT
- /* compression method */ + SHORT
- /* last mod file time */ + SHORT
- /* last mod file date */ + SHORT
- /* crc-32 */ + WORD
- /* compressed size */ + WORD
- /* uncompressed size */ + (long) WORD;
-
- /**
- * Walks through all recorded entries and adds the data available
- * from the local file header.
- *
- *
Also records the offsets for the data to read from the
- * entries.
- */
- private void resolveLocalFileHeaderData(final Map
- entriesWithoutUTF8Flag)
- throws IOException {
- for (final ZipArchiveEntry zipArchiveEntry : entries) {
- // entries is filled in populateFromCentralDirectory and
- // never modified
- final Entry ze = (Entry) zipArchiveEntry;
- final long offset = ze.getLocalHeaderOffset();
- archive.position(offset + LFH_OFFSET_FOR_FILENAME_LENGTH);
- wordBbuf.rewind();
- IOUtils.readFully(archive, wordBbuf);
- wordBbuf.flip();
- wordBbuf.get(shortBuf);
- final int fileNameLen = ZipShort.getValue(shortBuf);
- wordBbuf.get(shortBuf);
- final int extraFieldLen = ZipShort.getValue(shortBuf);
- skipBytes(fileNameLen);
- final byte[] localExtraData = new byte[extraFieldLen];
- IOUtils.readFully(archive, ByteBuffer.wrap(localExtraData));
- ze.setExtra(localExtraData);
- ze.setDataOffset(offset + LFH_OFFSET_FOR_FILENAME_LENGTH
- + SHORT + SHORT + fileNameLen + extraFieldLen);
- ze.setStreamContiguous(true);
-
- if (entriesWithoutUTF8Flag.containsKey(ze)) {
- final NameAndComment nc = entriesWithoutUTF8Flag.get(ze);
- ZipUtil.setNameAndCommentFromExtraFields(ze, nc.name,
- nc.comment);
- }
-
- final String name = ze.getName();
- LinkedList entriesOfThatName = nameMap.get(name);
- if (entriesOfThatName == null) {
- entriesOfThatName = new LinkedList<>();
- nameMap.put(name, entriesOfThatName);
- }
- entriesOfThatName.addLast(ze);
- }
- }
-
- /**
- * Checks whether the archive starts with a LFH. If it doesn't,
- * it may be an empty archive.
- */
- private boolean startsWithLocalFileHeader() throws IOException {
- archive.position(0);
- wordBbuf.rewind();
- IOUtils.readFully(archive, wordBbuf);
- return Arrays.equals(wordBuf, ZipArchiveOutputStream.LFH_SIG);
- }
-
- /**
- * Creates new BoundedInputStream, according to implementation of
- * underlying archive channel.
- */
- private BoundedInputStream createBoundedInputStream(long start, long remaining) {
- return archive instanceof FileChannel ?
- new BoundedFileChannelInputStream(start, remaining) :
- new BoundedInputStream(start, remaining);
- }
-
- /**
- * InputStream that delegates requests to the underlying
- * SeekableByteChannel, making sure that only bytes from a certain
- * range can be read.
- */
- private class BoundedInputStream extends InputStream {
- private ByteBuffer singleByteBuffer;
- private final long end;
- private long loc;
-
- BoundedInputStream(final long start, final long remaining) {
- this.end = start+remaining;
- if (this.end < start) {
- // check for potential vulnerability due to overflow
- throw new IllegalArgumentException("Invalid length of stream at offset="+start+", length="+remaining);
- }
- loc = start;
- }
-
- @Override
- public synchronized int read() throws IOException {
- if (loc >= end) {
- return -1;
- }
- if (singleByteBuffer == null) {
- singleByteBuffer = ByteBuffer.allocate(1);
- }
- else {
- singleByteBuffer.rewind();
- }
- int read = read(loc, singleByteBuffer);
- if (read < 0) {
- return read;
- }
- loc++;
- return singleByteBuffer.get() & 0xff;
- }
-
- @Override
- public synchronized int read(final byte[] b, final int off, int len) throws IOException {
- if (len <= 0) {
- return 0;
- }
-
- if (len > end-loc) {
- if (loc >= end) {
- return -1;
- }
- len = (int)(end-loc);
- }
-
- ByteBuffer buf;
- buf = ByteBuffer.wrap(b, off, len);
- int ret = read(loc, buf);
- if (ret > 0) {
- loc += ret;
- return ret;
- }
- return ret;
- }
-
- protected int read(long pos, ByteBuffer buf) throws IOException {
- int read;
- synchronized (archive) {
- archive.position(pos);
- read = archive.read(buf);
- }
- buf.flip();
- return read;
- }
- }
-
- /**
- * Lock-free implementation of BoundedInputStream. The
- * implementation uses positioned reads on the underlying archive
- * file channel and therefore performs significantly faster in
- * concurrent environment.
- */
- private class BoundedFileChannelInputStream extends BoundedInputStream {
- private final FileChannel archive;
-
- BoundedFileChannelInputStream(final long start, final long remaining) {
- super(start, remaining);
- archive = (FileChannel)ZipFile.this.archive;
- }
-
- @Override
- protected int read(long pos, ByteBuffer buf) throws IOException {
- int read = archive.read(buf, pos);
- buf.flip();
- return read;
- }
- }
-
- private static final class NameAndComment {
- private final byte[] name;
- private final byte[] comment;
- private NameAndComment(final byte[] name, final byte[] comment) {
- this.name = name;
- this.comment = comment;
- }
- }
-
- /**
- * Compares two ZipArchiveEntries based on their offset within the archive.
- *
- *
Won't return any meaningful results if one of the entries
- * isn't part of the archive at all.
- *
- * @since 1.1
- */
- private final Comparator offsetComparator =
- new Comparator() {
- @Override
- public int compare(final ZipArchiveEntry e1, final ZipArchiveEntry e2) {
- if (e1 == e2) {
- return 0;
- }
-
- final Entry ent1 = e1 instanceof Entry ? (Entry) e1 : null;
- final Entry ent2 = e2 instanceof Entry ? (Entry) e2 : null;
- if (ent1 == null) {
- return 1;
- }
- if (ent2 == null) {
- return -1;
- }
- final long val = (ent1.getLocalHeaderOffset()
- - ent2.getLocalHeaderOffset());
- return val == 0 ? 0 : val < 0 ? -1 : +1;
- }
- };
-
- /**
- * Extends ZipArchiveEntry to store the offset within the archive.
- */
- private static class Entry extends ZipArchiveEntry {
-
- Entry() {
- }
-
- @Override
- public int hashCode() {
- return 3 * super.hashCode()
- + (int) getLocalHeaderOffset()+(int)(getLocalHeaderOffset()>>32);
- }
-
- @Override
- public boolean equals(final Object other) {
- if (super.equals(other)) {
- // super.equals would return false if other were not an Entry
- final Entry otherEntry = (Entry) other;
- return getLocalHeaderOffset()
- == otherEntry.getLocalHeaderOffset()
- && getDataOffset()
- == otherEntry.getDataOffset();
- }
- return false;
- }
- }
-
- private static class StoredStatisticsStream extends CountingInputStream implements InputStreamStatistics {
- StoredStatisticsStream(InputStream in) {
- super(in);
- }
-
- @Override
- public long getCompressedCount() {
- return super.getBytesRead();
- }
-
- @Override
- public long getUncompressedCount() {
- return getCompressedCount();
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipLong.java b/src/org/apache/commons/compress/archivers/zip/ZipLong.java
deleted file mode 100644
index 6046c61da7b..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipLong.java
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import org.apache.commons.compress.utils.ByteUtils;
-
-import java.io.Serializable;
-
-import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD;
-
-/**
- * Utility class that represents a four byte integer with conversion
- * rules for the little endian byte order of ZIP files.
- * @Immutable
- */
-public final class ZipLong implements Cloneable, Serializable {
- private static final long serialVersionUID = 1L;
-
- private final long value;
-
- /** Central File Header Signature */
- public static final ZipLong CFH_SIG = new ZipLong(0X02014B50L);
-
- /** Local File Header Signature */
- public static final ZipLong LFH_SIG = new ZipLong(0X04034B50L);
-
- /**
- * Data Descriptor signature.
- *
- *
Actually, PKWARE uses this as marker for split/spanned
- * archives and other archivers have started to use it as Data
- * Descriptor signature (as well).
- * @since 1.1
- */
- public static final ZipLong DD_SIG = new ZipLong(0X08074B50L);
-
- /**
- * Value stored in size and similar fields if ZIP64 extensions are
- * used.
- * @since 1.3
- */
- static final ZipLong ZIP64_MAGIC = new ZipLong(ZipConstants.ZIP64_MAGIC);
-
- /**
- * Marks ZIP archives that were supposed to be split or spanned
- * but only needed a single segment in then end (so are actually
- * neither split nor spanned).
- *
- *
This is the "PK00" prefix found in some archives.
- * @since 1.5
- */
- public static final ZipLong SINGLE_SEGMENT_SPLIT_MARKER =
- new ZipLong(0X30304B50L);
-
- /**
- * Archive extra data record signature.
- * @since 1.5
- */
- public static final ZipLong AED_SIG = new ZipLong(0X08064B50L);
-
- /**
- * Create instance from a number.
- * @param value the long to store as a ZipLong
- */
- public ZipLong(final long value) {
- this.value = value;
- }
-
- /**
- * create instance from a java int.
- * @param value the int to store as a ZipLong
- * @since 1.15
- */
- public ZipLong(int value) {
- this.value = value;
- }
-
- /**
- * Create instance from bytes.
- * @param bytes the bytes to store as a ZipLong
- */
- public ZipLong (final byte[] bytes) {
- this(bytes, 0);
- }
-
- /**
- * Create instance from the four bytes starting at offset.
- * @param bytes the bytes to store as a ZipLong
- * @param offset the offset to start
- */
- public ZipLong (final byte[] bytes, final int offset) {
- value = ZipLong.getValue(bytes, offset);
- }
-
- /**
- * Get value as four bytes in big endian byte order.
- * @return value as four bytes in big endian order
- */
- public byte[] getBytes() {
- return ZipLong.getBytes(value);
- }
-
- /**
- * Get value as Java long.
- * @return value as a long
- */
- public long getValue() {
- return value;
- }
-
- /**
- * Get value as a (signed) java int
- * @return value as int
- * @since 1.15
- */
- public int getIntValue() { return (int)value;}
-
- /**
- * Get value as four bytes in big endian byte order.
- * @param value the value to convert
- * @return value as four bytes in big endian byte order
- */
- public static byte[] getBytes(final long value) {
- final byte[] result = new byte[WORD];
- putLong(value, result, 0);
- return result;
- }
-
- /**
- * put the value as four bytes in big endian byte order.
- * @param value the Java long to convert to bytes
- * @param buf the output buffer
- * @param offset
- * The offset within the output buffer of the first byte to be written.
- * must be non-negative and no larger than buf.length-4
- */
-
- public static void putLong(final long value, final byte[] buf, int offset) {
- ByteUtils.toLittleEndian(buf, value, offset, 4);
- }
-
- public void putLong(final byte[] buf, final int offset) {
- putLong(value, buf, offset);
- }
-
- /**
- * Helper method to get the value as a Java long from four bytes starting at given array offset
- * @param bytes the array of bytes
- * @param offset the offset to start
- * @return the corresponding Java long value
- */
- public static long getValue(final byte[] bytes, final int offset) {
- return ByteUtils.fromLittleEndian(bytes, offset, 4);
- }
-
- /**
- * Helper method to get the value as a Java long from a four-byte array
- * @param bytes the array of bytes
- * @return the corresponding Java long value
- */
- public static long getValue(final byte[] bytes) {
- return getValue(bytes, 0);
- }
-
- /**
- * Override to make two instances with same value equal.
- * @param o an object to compare
- * @return true if the objects are equal
- */
- @Override
- public boolean equals(final Object o) {
- if (o == null || !(o instanceof ZipLong)) {
- return false;
- }
- return value == ((ZipLong) o).getValue();
- }
-
- /**
- * Override to make two instances with same value equal.
- * @return the value stored in the ZipLong
- */
- @Override
- public int hashCode() {
- return (int) value;
- }
-
- @Override
- public Object clone() {
- try {
- return super.clone();
- } catch (final CloneNotSupportedException cnfe) {
- // impossible
- throw new RuntimeException(cnfe); //NOSONAR
- }
- }
-
- @Override
- public String toString() {
- return "ZipLong value: " + value;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipMethod.java b/src/org/apache/commons/compress/archivers/zip/ZipMethod.java
deleted file mode 100644
index 0c9112d7b89..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipMethod.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.zip.ZipEntry;
-
-/**
- * List of known compression methods
- *
- * Many of these methods are currently not supported by commons compress
- *
- * @since 1.5
- */
-public enum ZipMethod {
-
- /**
- * Compression method 0 for uncompressed entries.
- *
- * @see ZipEntry#STORED
- */
- STORED(ZipEntry.STORED),
-
- /**
- * UnShrinking.
- * dynamic Lempel-Ziv-Welch-Algorithm
- *
- * @see Explanation of fields: compression
- * method: (2 bytes)
- */
- UNSHRINKING(1),
-
- /**
- * Reduced with compression factor 1.
- *
- * @see Explanation of fields: compression
- * method: (2 bytes)
- */
- EXPANDING_LEVEL_1(2),
-
- /**
- * Reduced with compression factor 2.
- *
- * @see Explanation of fields: compression
- * method: (2 bytes)
- */
- EXPANDING_LEVEL_2(3),
-
- /**
- * Reduced with compression factor 3.
- *
- * @see Explanation of fields: compression
- * method: (2 bytes)
- */
- EXPANDING_LEVEL_3(4),
-
- /**
- * Reduced with compression factor 4.
- *
- * @see Explanation of fields: compression
- * method: (2 bytes)
- */
- EXPANDING_LEVEL_4(5),
-
- /**
- * Imploding.
- *
- * @see Explanation of fields: compression
- * method: (2 bytes)
- */
- IMPLODING(6),
-
- /**
- * Tokenization.
- *
- * @see Explanation of fields: compression
- * method: (2 bytes)
- */
- TOKENIZATION(7),
-
- /**
- * Compression method 8 for compressed (deflated) entries.
- *
- * @see ZipEntry#DEFLATED
- */
- DEFLATED(ZipEntry.DEFLATED),
-
- /**
- * Compression Method 9 for enhanced deflate.
- *
- * @see https://www.winzip.com/wz54.htm
- */
- ENHANCED_DEFLATED(9),
-
- /**
- * PKWARE Data Compression Library Imploding.
- *
- * @see https://www.winzip.com/wz54.htm
- */
- PKWARE_IMPLODING(10),
-
- /**
- * Compression Method 12 for bzip2.
- *
- * @see https://www.winzip.com/wz54.htm
- */
- BZIP2(12),
-
- /**
- * Compression Method 14 for LZMA.
- *
- * @see https://www.7-zip.org/sdk.html
- * @see https://www.winzip.com/wz54.htm
- */
- LZMA(14),
-
-
- /**
- * Compression Method 95 for XZ.
- *
- * @see https://www.winzip.com/wz54.htm
- */
- XZ(95),
-
- /**
- * Compression Method 96 for Jpeg compression.
- *
- * @see https://www.winzip.com/wz54.htm
- */
- JPEG(96),
-
- /**
- * Compression Method 97 for WavPack.
- *
- * @see https://www.winzip.com/wz54.htm
- */
- WAVPACK(97),
-
- /**
- * Compression Method 98 for PPMd.
- *
- * @see https://www.winzip.com/wz54.htm
- */
- PPMD(98),
-
-
- /**
- * Compression Method 99 for AES encryption.
- *
- * @see https://www.winzip.com/wz54.htm
- */
- AES_ENCRYPTED(99),
-
- /**
- * Unknown compression method.
- */
- UNKNOWN();
-
- static final int UNKNOWN_CODE = -1;
-
- private final int code;
-
- private static final Map codeToEnum;
-
- static {
- final Map cte = new HashMap<>();
- for (final ZipMethod method : values()) {
- cte.put(method.getCode(), method);
- }
- codeToEnum = Collections.unmodifiableMap(cte);
- }
-
- private ZipMethod() {
- this(UNKNOWN_CODE);
- }
-
- /**
- * private constructor for enum style class.
- */
- ZipMethod(final int code) {
- this.code = code;
- }
-
- /**
- * the code of the compression method.
- *
- * @see ZipArchiveEntry#getMethod()
- *
- * @return an integer code for the method
- */
- public int getCode() {
- return code;
- }
-
-
- /**
- * returns the {@link ZipMethod} for the given code or null if the
- * method is not known.
- * @param code the code
- * @return the {@link ZipMethod} for the given code or null if the
- * method is not known.
- */
- public static ZipMethod getMethodByCode(final int code) {
- return codeToEnum.get(code);
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipShort.java b/src/org/apache/commons/compress/archivers/zip/ZipShort.java
deleted file mode 100644
index ccb50940a3d..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipShort.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.Serializable;
-
-import org.apache.commons.compress.utils.ByteUtils;
-
-/**
- * Utility class that represents a two byte integer with conversion
- * rules for the little endian byte order of ZIP files.
- * @Immutable
- */
-public final class ZipShort implements Cloneable, Serializable {
- /**
- * ZipShort with a value of 0.
- * @since 1.14
- */
- public static final ZipShort ZERO = new ZipShort(0);
-
- private static final long serialVersionUID = 1L;
-
- private final int value;
-
- /**
- * Create instance from a number.
- * @param value the int to store as a ZipShort
- */
- public ZipShort (final int value) {
- this.value = value;
- }
-
- /**
- * Create instance from bytes.
- * @param bytes the bytes to store as a ZipShort
- */
- public ZipShort (final byte[] bytes) {
- this(bytes, 0);
- }
-
- /**
- * Create instance from the two bytes starting at offset.
- * @param bytes the bytes to store as a ZipShort
- * @param offset the offset to start
- */
- public ZipShort (final byte[] bytes, final int offset) {
- value = ZipShort.getValue(bytes, offset);
- }
-
- /**
- * Get value as two bytes in big endian byte order.
- * @return the value as a a two byte array in big endian byte order
- */
- public byte[] getBytes() {
- final byte[] result = new byte[2];
- ByteUtils.toLittleEndian(result, value, 0, 2);
- return result;
- }
-
- /**
- * Get value as Java int.
- * @return value as a Java int
- */
- public int getValue() {
- return value;
- }
-
- /**
- * Get value as two bytes in big endian byte order.
- * @param value the Java int to convert to bytes
- * @return the converted int as a byte array in big endian byte order
- */
- public static byte[] getBytes(final int value) {
- final byte[] result = new byte[2];
- putShort(value, result, 0);
- return result;
- }
-
- /**
- * put the value as two bytes in big endian byte order.
- * @param value the Java int to convert to bytes
- * @param buf the output buffer
- * @param offset
- * The offset within the output buffer of the first byte to be written.
- * must be non-negative and no larger than buf.length-2
- */
- public static void putShort(final int value, final byte[] buf, final int offset) {
- ByteUtils.toLittleEndian(buf, value, offset, 2);
- }
-
- /**
- * Helper method to get the value as a java int from two bytes starting at given array offset
- * @param bytes the array of bytes
- * @param offset the offset to start
- * @return the corresponding java int value
- */
- public static int getValue(final byte[] bytes, final int offset) {
- return (int) ByteUtils.fromLittleEndian(bytes, offset, 2);
- }
-
- /**
- * Helper method to get the value as a java int from a two-byte array
- * @param bytes the array of bytes
- * @return the corresponding java int value
- */
- public static int getValue(final byte[] bytes) {
- return getValue(bytes, 0);
- }
-
- /**
- * Override to make two instances with same value equal.
- * @param o an object to compare
- * @return true if the objects are equal
- */
- @Override
- public boolean equals(final Object o) {
- if (o == null || !(o instanceof ZipShort)) {
- return false;
- }
- return value == ((ZipShort) o).getValue();
- }
-
- /**
- * Override to make two instances with same value equal.
- * @return the value stored in the ZipShort
- */
- @Override
- public int hashCode() {
- return value;
- }
-
- @Override
- public Object clone() {
- try {
- return super.clone();
- } catch (final CloneNotSupportedException cnfe) {
- // impossible
- throw new RuntimeException(cnfe); //NOSONAR
- }
- }
-
- @Override
- public String toString() {
- return "ZipShort value: " + value;
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/ZipUtil.java b/src/org/apache/commons/compress/archivers/zip/ZipUtil.java
deleted file mode 100644
index 8cc3e6a4c19..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/ZipUtil.java
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.archivers.zip;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.zip.CRC32;
-import java.util.zip.ZipEntry;
-
-/**
- * Utility class for handling DOS and Java time conversions.
- * @Immutable
- */
-public abstract class ZipUtil {
- /**
- * Smallest date/time ZIP can handle.
- */
- private static final byte[] DOS_TIME_MIN = ZipLong.getBytes(0x00002100L);
-
- /**
- * Convert a Date object to a DOS date/time field.
- * @param time the Date to convert
- * @return the date as a ZipLong
- */
- public static ZipLong toDosTime(final Date time) {
- return new ZipLong(toDosTime(time.getTime()));
- }
-
- /**
- * Convert a Date object to a DOS date/time field.
- *
- *
Stolen from InfoZip's fileio.c
- * @param t number of milliseconds since the epoch
- * @return the date as a byte array
- */
- public static byte[] toDosTime(final long t) {
- final byte[] result = new byte[4];
- toDosTime(t, result, 0);
- return result;
- }
-
- /**
- * Convert a Date object to a DOS date/time field.
- *
- *
Stolen from InfoZip's fileio.c
- * @param t number of milliseconds since the epoch
- * @param buf the output buffer
- * @param offset
- * The offset within the output buffer of the first byte to be written.
- * must be non-negative and no larger than buf.length-4
- */
- public static void toDosTime(final long t, final byte[] buf, final int offset) {
- toDosTime(Calendar.getInstance(), t, buf, offset);
- }
-
- static void toDosTime(final Calendar c, final long t, final byte[] buf, final int offset) {
- c.setTimeInMillis(t);
-
- final int year = c.get(Calendar.YEAR);
- if (year < 1980) {
- System.arraycopy(DOS_TIME_MIN, 0, buf, offset, DOS_TIME_MIN.length);// stop callers from changing the array
- return;
- }
- final int month = c.get(Calendar.MONTH) + 1;
- final long value = ((year - 1980) << 25)
- | (month << 21)
- | (c.get(Calendar.DAY_OF_MONTH) << 16)
- | (c.get(Calendar.HOUR_OF_DAY) << 11)
- | (c.get(Calendar.MINUTE) << 5)
- | (c.get(Calendar.SECOND) >> 1);
- ZipLong.putLong(value, buf, offset);
- }
-
-
- /**
- * Assumes a negative integer really is a positive integer that
- * has wrapped around and re-creates the original value.
- *
- * @param i the value to treat as unsigned int.
- * @return the unsigned int as a long.
- */
- public static long adjustToLong(final int i) {
- if (i < 0) {
- return 2 * ((long) Integer.MAX_VALUE) + 2 + i;
- }
- return i;
- }
-
- /**
- * Reverses a byte[] array. Reverses in-place (thus provided array is
- * mutated), but also returns same for convenience.
- *
- * @param array to reverse (mutated in-place, but also returned for
- * convenience).
- *
- * @return the reversed array (mutated in-place, but also returned for
- * convenience).
- * @since 1.5
- */
- public static byte[] reverse(final byte[] array) {
- final int z = array.length - 1; // position of last element
- for (int i = 0; i < array.length / 2; i++) {
- final byte x = array[i];
- array[i] = array[z - i];
- array[z - i] = x;
- }
- return array;
- }
-
- /**
- * Converts a BigInteger into a long, and blows up
- * (NumberFormatException) if the BigInteger is too big.
- *
- * @param big BigInteger to convert.
- * @return long representation of the BigInteger.
- */
- static long bigToLong(final BigInteger big) {
- if (big.bitLength() <= 63) { // bitLength() doesn't count the sign bit.
- return big.longValue();
- }
- throw new NumberFormatException("The BigInteger cannot fit inside a 64 bit java long: [" + big + "]");
- }
-
- /**
- *
- * Converts a long into a BigInteger. Negative numbers between -1 and
- * -2^31 are treated as unsigned 32 bit (e.g., positive) integers.
- * Negative numbers below -2^31 cause an IllegalArgumentException
- * to be thrown.
- *
- *
- * @param l long to convert to BigInteger.
- * @return BigInteger representation of the provided long.
- */
- static BigInteger longToBig(long l) {
- if (l < Integer.MIN_VALUE) {
- throw new IllegalArgumentException("Negative longs < -2^31 not permitted: [" + l + "]");
- } else if (l < 0 && l >= Integer.MIN_VALUE) {
- // If someone passes in a -2, they probably mean 4294967294
- // (For example, Unix UID/GID's are 32 bit unsigned.)
- l = ZipUtil.adjustToLong((int) l);
- }
- return BigInteger.valueOf(l);
- }
-
- /**
- * Converts a signed byte into an unsigned integer representation
- * (e.g., -1 becomes 255).
- *
- * @param b byte to convert to int
- * @return int representation of the provided byte
- * @since 1.5
- */
- public static int signedByteToUnsignedInt(final byte b) {
- if (b >= 0) {
- return b;
- }
- return 256 + b;
- }
-
- /**
- * Converts an unsigned integer to a signed byte (e.g., 255 becomes -1).
- *
- * @param i integer to convert to byte
- * @return byte representation of the provided int
- * @throws IllegalArgumentException if the provided integer is not inside the range [0,255].
- * @since 1.5
- */
- public static byte unsignedIntToSignedByte(final int i) {
- if (i > 255 || i < 0) {
- throw new IllegalArgumentException("Can only convert non-negative integers between [0,255] to byte: [" + i + "]");
- }
- if (i < 128) {
- return (byte) i;
- }
- return (byte) (i - 256);
- }
-
- /**
- * Convert a DOS date/time field to a Date object.
- *
- * @param zipDosTime contains the stored DOS time.
- * @return a Date instance corresponding to the given time.
- */
- public static Date fromDosTime(final ZipLong zipDosTime) {
- final long dosTime = zipDosTime.getValue();
- return new Date(dosToJavaTime(dosTime));
- }
-
- /**
- * Converts DOS time to Java time (number of milliseconds since
- * epoch).
- * @param dosTime time to convert
- * @return converted time
- */
- public static long dosToJavaTime(final long dosTime) {
- final Calendar cal = Calendar.getInstance();
- // CheckStyle:MagicNumberCheck OFF - no point
- cal.set(Calendar.YEAR, (int) ((dosTime >> 25) & 0x7f) + 1980);
- cal.set(Calendar.MONTH, (int) ((dosTime >> 21) & 0x0f) - 1);
- cal.set(Calendar.DATE, (int) (dosTime >> 16) & 0x1f);
- cal.set(Calendar.HOUR_OF_DAY, (int) (dosTime >> 11) & 0x1f);
- cal.set(Calendar.MINUTE, (int) (dosTime >> 5) & 0x3f);
- cal.set(Calendar.SECOND, (int) (dosTime << 1) & 0x3e);
- cal.set(Calendar.MILLISECOND, 0);
- // CheckStyle:MagicNumberCheck ON
- return cal.getTime().getTime();
- }
-
- /**
- * If the entry has Unicode*ExtraFields and the CRCs of the
- * names/comments match those of the extra fields, transfer the
- * known Unicode values from the extra field.
- */
- static void setNameAndCommentFromExtraFields(final ZipArchiveEntry ze,
- final byte[] originalNameBytes,
- final byte[] commentBytes) {
- final UnicodePathExtraField name = (UnicodePathExtraField)
- ze.getExtraField(UnicodePathExtraField.UPATH_ID);
- final String newName = getUnicodeStringIfOriginalMatches(name,
- originalNameBytes);
- if (newName != null) {
- ze.setName(newName);
- ze.setNameSource(ZipArchiveEntry.NameSource.UNICODE_EXTRA_FIELD);
- }
-
- if (commentBytes != null && commentBytes.length > 0) {
- final UnicodeCommentExtraField cmt = (UnicodeCommentExtraField)
- ze.getExtraField(UnicodeCommentExtraField.UCOM_ID);
- final String newComment =
- getUnicodeStringIfOriginalMatches(cmt, commentBytes);
- if (newComment != null) {
- ze.setComment(newComment);
- ze.setCommentSource(ZipArchiveEntry.CommentSource.UNICODE_EXTRA_FIELD);
- }
- }
- }
-
- /**
- * If the stored CRC matches the one of the given name, return the
- * Unicode name of the given field.
- *
- *
If the field is null or the CRCs don't match, return null
- * instead.
- */
- private static
- String getUnicodeStringIfOriginalMatches(final AbstractUnicodeExtraField f,
- final byte[] orig) {
- if (f != null) {
- final CRC32 crc32 = new CRC32();
- crc32.update(orig);
- final long origCRC32 = crc32.getValue();
-
- if (origCRC32 == f.getNameCRC32()) {
- try {
- return ZipEncodingHelper
- .UTF8_ZIP_ENCODING.decode(f.getUnicodeName());
- } catch (final IOException ex) {
- // UTF-8 unsupported? should be impossible the
- // Unicode*ExtraField must contain some bad bytes
-
- // TODO log this anywhere?
- return null;
- }
- }
- }
- return null;
- }
-
- /**
- * Create a copy of the given array - or return null if the
- * argument is null.
- */
- static byte[] copy(final byte[] from) {
- if (from != null) {
- final byte[] to = new byte[from.length];
- System.arraycopy(from, 0, to, 0, to.length);
- return to;
- }
- return null;
- }
-
- static void copy(final byte[] from, final byte[] to, final int offset) {
- if (from != null) {
- System.arraycopy(from, 0, to, offset, from.length);
- }
- }
-
-
- /**
- * Whether this library is able to read or write the given entry.
- */
- static boolean canHandleEntryData(final ZipArchiveEntry entry) {
- return supportsEncryptionOf(entry) && supportsMethodOf(entry);
- }
-
- /**
- * Whether this library supports the encryption used by the given
- * entry.
- *
- * @return true if the entry isn't encrypted at all
- */
- private static boolean supportsEncryptionOf(final ZipArchiveEntry entry) {
- return !entry.getGeneralPurposeBit().usesEncryption();
- }
-
- /**
- * Whether this library supports the compression method used by
- * the given entry.
- *
- * @return true if the compression method is supported
- */
- private static boolean supportsMethodOf(final ZipArchiveEntry entry) {
- return entry.getMethod() == ZipEntry.STORED
- || entry.getMethod() == ZipMethod.UNSHRINKING.getCode()
- || entry.getMethod() == ZipMethod.IMPLODING.getCode()
- || entry.getMethod() == ZipEntry.DEFLATED
- || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode()
- || entry.getMethod() == ZipMethod.BZIP2.getCode();
- }
-
- /**
- * Checks whether the entry requires features not (yet) supported
- * by the library and throws an exception if it does.
- */
- static void checkRequestedFeatures(final ZipArchiveEntry ze)
- throws UnsupportedZipFeatureException {
- if (!supportsEncryptionOf(ze)) {
- throw
- new UnsupportedZipFeatureException(UnsupportedZipFeatureException
- .Feature.ENCRYPTION, ze);
- }
- if (!supportsMethodOf(ze)) {
- final ZipMethod m = ZipMethod.getMethodByCode(ze.getMethod());
- if (m == null) {
- throw
- new UnsupportedZipFeatureException(UnsupportedZipFeatureException
- .Feature.METHOD, ze);
- }
- throw new UnsupportedZipFeatureException(m, ze);
- }
- }
-}
diff --git a/src/org/apache/commons/compress/archivers/zip/package.html b/src/org/apache/commons/compress/archivers/zip/package.html
deleted file mode 100644
index 521687be66f..00000000000
--- a/src/org/apache/commons/compress/archivers/zip/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
Provides stream classes for reading and writing archives using
- the ZIP format.
-
-
diff --git a/src/org/apache/commons/compress/changes/Change.java b/src/org/apache/commons/compress/changes/Change.java
deleted file mode 100644
index fb901bd41a0..00000000000
--- a/src/org/apache/commons/compress/changes/Change.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.changes;
-
-import java.io.InputStream;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-
-/**
- * Change holds meta information about a change.
- *
- * @Immutable
- */
-class Change {
- private final String targetFile; // entry name to delete
- private final ArchiveEntry entry; // new entry to add
- private final InputStream input; // source for new entry
- private final boolean replaceMode; // change should replaceMode existing entries
-
- // Type of change
- private final int type;
- // Possible type values
- static final int TYPE_DELETE = 1;
- static final int TYPE_ADD = 2;
- static final int TYPE_MOVE = 3; // NOT USED
- static final int TYPE_DELETE_DIR = 4;
-
- /**
- * Constructor. Takes the filename of the file to be deleted
- * from the stream as argument.
- * @param pFilename the filename of the file to delete
- */
- Change(final String pFilename, final int type) {
- if(pFilename == null) {
- throw new NullPointerException();
- }
- this.targetFile = pFilename;
- this.type = type;
- this.input = null;
- this.entry = null;
- this.replaceMode = true;
- }
-
- /**
- * Construct a change which adds an entry.
- *
- * @param pEntry the entry details
- * @param pInput the InputStream for the entry data
- */
- Change(final ArchiveEntry pEntry, final InputStream pInput, final boolean replace) {
- if(pEntry == null || pInput == null) {
- throw new NullPointerException();
- }
- this.entry = pEntry;
- this.input = pInput;
- type = TYPE_ADD;
- targetFile = null;
- this.replaceMode = replace;
- }
-
- ArchiveEntry getEntry() {
- return entry;
- }
-
- InputStream getInput() {
- return input;
- }
-
- String targetFile() {
- return targetFile;
- }
-
- int type() {
- return type;
- }
-
- boolean isReplaceMode() {
- return replaceMode;
- }
-}
diff --git a/src/org/apache/commons/compress/changes/ChangeSet.java b/src/org/apache/commons/compress/changes/ChangeSet.java
deleted file mode 100644
index c0f8c61e4ba..00000000000
--- a/src/org/apache/commons/compress/changes/ChangeSet.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.changes;
-
-import java.io.InputStream;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.Set;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-
-/**
- * ChangeSet collects and performs changes to an archive.
- * Putting delete changes in this ChangeSet from multiple threads can
- * cause conflicts.
- *
- * @NotThreadSafe
- */
-public final class ChangeSet {
-
- private final Set changes = new LinkedHashSet<>();
-
- /**
- * Deletes the file with the filename from the archive.
- *
- * @param filename
- * the filename of the file to delete
- */
- public void delete(final String filename) {
- addDeletion(new Change(filename, Change.TYPE_DELETE));
- }
-
- /**
- * Deletes the directory tree from the archive.
- *
- * @param dirName
- * the name of the directory tree to delete
- */
- public void deleteDir(final String dirName) {
- addDeletion(new Change(dirName, Change.TYPE_DELETE_DIR));
- }
-
- /**
- * Adds a new archive entry to the archive.
- *
- * @param pEntry
- * the entry to add
- * @param pInput
- * the datastream to add
- */
- public void add(final ArchiveEntry pEntry, final InputStream pInput) {
- this.add(pEntry, pInput, true);
- }
-
- /**
- * Adds a new archive entry to the archive.
- * If replace is set to true, this change will replace all other additions
- * done in this ChangeSet and all existing entries in the original stream.
- *
- * @param pEntry
- * the entry to add
- * @param pInput
- * the datastream to add
- * @param replace
- * indicates the this change should replace existing entries
- */
- public void add(final ArchiveEntry pEntry, final InputStream pInput, final boolean replace) {
- addAddition(new Change(pEntry, pInput, replace));
- }
-
- /**
- * Adds an addition change.
- *
- * @param pChange
- * the change which should result in an addition
- */
- private void addAddition(final Change pChange) {
- if (Change.TYPE_ADD != pChange.type() ||
- pChange.getInput() == null) {
- return;
- }
-
- if (!changes.isEmpty()) {
- for (final Iterator it = changes.iterator(); it.hasNext();) {
- final Change change = it.next();
- if (change.type() == Change.TYPE_ADD
- && change.getEntry() != null) {
- final ArchiveEntry entry = change.getEntry();
-
- if(entry.equals(pChange.getEntry())) {
- if(pChange.isReplaceMode()) {
- it.remove();
- changes.add(pChange);
- return;
- }
- // do not add this change
- return;
- }
- }
- }
- }
- changes.add(pChange);
- }
-
- /**
- * Adds an delete change.
- *
- * @param pChange
- * the change which should result in a deletion
- */
- private void addDeletion(final Change pChange) {
- if ((Change.TYPE_DELETE != pChange.type() &&
- Change.TYPE_DELETE_DIR != pChange.type()) ||
- pChange.targetFile() == null) {
- return;
- }
- final String source = pChange.targetFile();
-
- if (source != null && !changes.isEmpty()) {
- for (final Iterator it = changes.iterator(); it.hasNext();) {
- final Change change = it.next();
- if (change.type() == Change.TYPE_ADD
- && change.getEntry() != null) {
- final String target = change.getEntry().getName();
-
- if (target == null) {
- continue;
- }
-
- if (Change.TYPE_DELETE == pChange.type() && source.equals(target) ||
- (Change.TYPE_DELETE_DIR == pChange.type() && target.matches(source + "/.*"))) {
- it.remove();
- }
- }
- }
- }
- changes.add(pChange);
- }
-
- /**
- * Returns the list of changes as a copy. Changes on this set
- * are not reflected on this ChangeSet and vice versa.
- * @return the changes as a copy
- */
- Set getChanges() {
- return new LinkedHashSet<>(changes);
- }
-}
diff --git a/src/org/apache/commons/compress/changes/ChangeSetPerformer.java b/src/org/apache/commons/compress/changes/ChangeSetPerformer.java
deleted file mode 100644
index bec6b642103..00000000000
--- a/src/org/apache/commons/compress/changes/ChangeSetPerformer.java
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.changes;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Enumeration;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.Set;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipFile;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * Performs ChangeSet operations on a stream.
- * This class is thread safe and can be used multiple times.
- * It operates on a copy of the ChangeSet. If the ChangeSet changes,
- * a new Performer must be created.
- *
- * @ThreadSafe
- * @Immutable
- */
-public class ChangeSetPerformer {
- private final Set changes;
-
- /**
- * Constructs a ChangeSetPerformer with the changes from this ChangeSet
- * @param changeSet the ChangeSet which operations are used for performing
- */
- public ChangeSetPerformer(final ChangeSet changeSet) {
- changes = changeSet.getChanges();
- }
-
- /**
- * Performs all changes collected in this ChangeSet on the input stream and
- * streams the result to the output stream. Perform may be called more than once.
- *
- * This method finishes the stream, no other entries should be added
- * after that.
- *
- * @param in
- * the InputStream to perform the changes on
- * @param out
- * the resulting OutputStream with all modifications
- * @throws IOException
- * if an read/write error occurs
- * @return the results of this operation
- */
- public ChangeSetResults perform(final ArchiveInputStream in, final ArchiveOutputStream out)
- throws IOException {
- return perform(new ArchiveInputStreamIterator(in), out);
- }
-
- /**
- * Performs all changes collected in this ChangeSet on the ZipFile and
- * streams the result to the output stream. Perform may be called more than once.
- *
- * This method finishes the stream, no other entries should be added
- * after that.
- *
- * @param in
- * the ZipFile to perform the changes on
- * @param out
- * the resulting OutputStream with all modifications
- * @throws IOException
- * if an read/write error occurs
- * @return the results of this operation
- * @since 1.5
- */
- public ChangeSetResults perform(final ZipFile in, final ArchiveOutputStream out)
- throws IOException {
- return perform(new ZipFileIterator(in), out);
- }
-
- /**
- * Performs all changes collected in this ChangeSet on the input entries and
- * streams the result to the output stream.
- *
- * This method finishes the stream, no other entries should be added
- * after that.
- *
- * @param entryIterator
- * the entries to perform the changes on
- * @param out
- * the resulting OutputStream with all modifications
- * @throws IOException
- * if an read/write error occurs
- * @return the results of this operation
- */
- private ChangeSetResults perform(final ArchiveEntryIterator entryIterator,
- final ArchiveOutputStream out)
- throws IOException {
- final ChangeSetResults results = new ChangeSetResults();
-
- final Set workingSet = new LinkedHashSet<>(changes);
-
- for (final Iterator it = workingSet.iterator(); it.hasNext();) {
- final Change change = it.next();
-
- if (change.type() == Change.TYPE_ADD && change.isReplaceMode()) {
- copyStream(change.getInput(), out, change.getEntry());
- it.remove();
- results.addedFromChangeSet(change.getEntry().getName());
- }
- }
-
- while (entryIterator.hasNext()) {
- final ArchiveEntry entry = entryIterator.next();
- boolean copy = true;
-
- for (final Iterator it = workingSet.iterator(); it.hasNext();) {
- final Change change = it.next();
-
- final int type = change.type();
- final String name = entry.getName();
- if (type == Change.TYPE_DELETE && name != null) {
- if (name.equals(change.targetFile())) {
- copy = false;
- it.remove();
- results.deleted(name);
- break;
- }
- } else if (type == Change.TYPE_DELETE_DIR && name != null) {
- // don't combine ifs to make future extensions more easy
- if (name.startsWith(change.targetFile() + "/")) { // NOPMD
- copy = false;
- results.deleted(name);
- break;
- }
- }
- }
-
- if (copy
- && !isDeletedLater(workingSet, entry)
- && !results.hasBeenAdded(entry.getName())) {
- copyStream(entryIterator.getInputStream(), out, entry);
- results.addedFromStream(entry.getName());
- }
- }
-
- // Adds files which hasn't been added from the original and do not have replace mode on
- for (final Iterator it = workingSet.iterator(); it.hasNext();) {
- final Change change = it.next();
-
- if (change.type() == Change.TYPE_ADD &&
- !change.isReplaceMode() &&
- !results.hasBeenAdded(change.getEntry().getName())) {
- copyStream(change.getInput(), out, change.getEntry());
- it.remove();
- results.addedFromChangeSet(change.getEntry().getName());
- }
- }
- out.finish();
- return results;
- }
-
- /**
- * Checks if an ArchiveEntry is deleted later in the ChangeSet. This is
- * necessary if an file is added with this ChangeSet, but later became
- * deleted in the same set.
- *
- * @param entry
- * the entry to check
- * @return true, if this entry has an deletion change later, false otherwise
- */
- private boolean isDeletedLater(final Set workingSet, final ArchiveEntry entry) {
- final String source = entry.getName();
-
- if (!workingSet.isEmpty()) {
- for (final Change change : workingSet) {
- final int type = change.type();
- final String target = change.targetFile();
- if (type == Change.TYPE_DELETE && source.equals(target)) {
- return true;
- }
-
- if (type == Change.TYPE_DELETE_DIR && source.startsWith(target + "/")){
- return true;
- }
- }
- }
- return false;
- }
-
- /**
- * Copies the ArchiveEntry to the Output stream
- *
- * @param in
- * the stream to read the data from
- * @param out
- * the stream to write the data to
- * @param entry
- * the entry to write
- * @throws IOException
- * if data cannot be read or written
- */
- private void copyStream(final InputStream in, final ArchiveOutputStream out,
- final ArchiveEntry entry) throws IOException {
- out.putArchiveEntry(entry);
- IOUtils.copy(in, out);
- out.closeArchiveEntry();
- }
-
- /**
- * Used in perform to abstract out getting entries and streams for
- * those entries.
- *
- *
Iterator#hasNext is not allowed to throw exceptions that's
- * why we can't use Iterator<ArchiveEntry> directly -
- * otherwise we'd need to convert exceptions thrown in
- * ArchiveInputStream#getNextEntry.
- */
- interface ArchiveEntryIterator {
- boolean hasNext() throws IOException;
- ArchiveEntry next();
- InputStream getInputStream() throws IOException;
- }
-
- private static class ArchiveInputStreamIterator
- implements ArchiveEntryIterator {
- private final ArchiveInputStream in;
- private ArchiveEntry next;
- ArchiveInputStreamIterator(final ArchiveInputStream in) {
- this.in = in;
- }
- @Override
- public boolean hasNext() throws IOException {
- return (next = in.getNextEntry()) != null;
- }
- @Override
- public ArchiveEntry next() {
- return next;
- }
- @Override
- public InputStream getInputStream() {
- return in;
- }
- }
-
- private static class ZipFileIterator
- implements ArchiveEntryIterator {
- private final ZipFile in;
- private final Enumeration nestedEnum;
- private ZipArchiveEntry current;
- ZipFileIterator(final ZipFile in) {
- this.in = in;
- nestedEnum = in.getEntriesInPhysicalOrder();
- }
- @Override
- public boolean hasNext() {
- return nestedEnum.hasMoreElements();
- }
- @Override
- public ArchiveEntry next() {
- current = nestedEnum.nextElement();
- return current;
- }
- @Override
- public InputStream getInputStream() throws IOException {
- return in.getInputStream(current);
- }
- }
-}
diff --git a/src/org/apache/commons/compress/changes/ChangeSetResults.java b/src/org/apache/commons/compress/changes/ChangeSetResults.java
deleted file mode 100644
index 788dccfaee4..00000000000
--- a/src/org/apache/commons/compress/changes/ChangeSetResults.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.changes;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Stores the results of an performed ChangeSet operation.
- */
-public class ChangeSetResults {
- private final List addedFromChangeSet = new ArrayList<>();
- private final List addedFromStream = new ArrayList<>();
- private final List deleted = new ArrayList<>();
-
- /**
- * Adds the filename of a recently deleted file to the result list.
- * @param fileName the file which has been deleted
- */
- void deleted(final String fileName) {
- deleted.add(fileName);
- }
-
- /**
- * Adds the name of a file to the result list which has been
- * copied from the source stream to the target stream.
- * @param fileName the file name which has been added from the original stream
- */
- void addedFromStream(final String fileName) {
- addedFromStream.add(fileName);
- }
-
- /**
- * Adds the name of a file to the result list which has been
- * copied from the changeset to the target stream
- * @param fileName the name of the file
- */
- void addedFromChangeSet(final String fileName) {
- addedFromChangeSet.add(fileName);
- }
-
- /**
- * Returns a list of filenames which has been added from the changeset
- * @return the list of filenames
- */
- public List getAddedFromChangeSet() {
- return addedFromChangeSet;
- }
-
- /**
- * Returns a list of filenames which has been added from the original stream
- * @return the list of filenames
- */
- public List getAddedFromStream() {
- return addedFromStream;
- }
-
- /**
- * Returns a list of filenames which has been deleted
- * @return the list of filenames
- */
- public List getDeleted() {
- return deleted;
- }
-
- /**
- * Checks if an filename already has been added to the result list
- * @param filename the filename to check
- * @return true, if this filename already has been added
- */
- boolean hasBeenAdded(final String filename) {
- return addedFromChangeSet.contains(filename) || addedFromStream.contains(filename);
- }
-}
diff --git a/src/org/apache/commons/compress/changes/package.html b/src/org/apache/commons/compress/changes/package.html
deleted file mode 100644
index 4ba3e87d090..00000000000
--- a/src/org/apache/commons/compress/changes/package.html
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-
EXPERIMENTAL support for changesets that are applied to
- archives.
-
-
This API is considered unstable and may be modified or even
- removed in future releases.
-
-
diff --git a/src/org/apache/commons/compress/compressors/CompressorException.java b/src/org/apache/commons/compress/compressors/CompressorException.java
deleted file mode 100644
index 9af3e69699c..00000000000
--- a/src/org/apache/commons/compress/compressors/CompressorException.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors;
-
-/**
- * Compressor related exception
- */
-public class CompressorException extends Exception {
-
- /** Serial */
- private static final long serialVersionUID = -2932901310255908814L;
-
- /**
- * Constructs a new exception with the specified detail message. The cause
- * is not initialized.
- *
- * @param message
- * the detail message
- */
- public CompressorException(final String message) {
- super(message);
- }
-
- /**
- * Constructs a new exception with the specified detail message and cause.
- *
- * @param message
- * the detail message
- * @param cause
- * the cause
- */
- public CompressorException(final String message, final Throwable cause) {
- super(message, cause);
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/CompressorInputStream.java b/src/org/apache/commons/compress/compressors/CompressorInputStream.java
deleted file mode 100644
index 67de705a274..00000000000
--- a/src/org/apache/commons/compress/compressors/CompressorInputStream.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors;
-
-import java.io.InputStream;
-
-public abstract class CompressorInputStream extends InputStream {
- private long bytesRead = 0;
-
- /**
- * Increments the counter of already read bytes.
- * Doesn't increment if the EOF has been hit (read == -1)
- *
- * @param read the number of bytes read
- *
- * @since 1.1
- */
- protected void count(final int read) {
- count((long) read);
- }
-
- /**
- * Increments the counter of already read bytes.
- * Doesn't increment if the EOF has been hit (read == -1)
- *
- * @param read the number of bytes read
- */
- protected void count(final long read) {
- if (read != -1) {
- bytesRead = bytesRead + read;
- }
- }
-
- /**
- * Decrements the counter of already read bytes.
- *
- * @param pushedBack the number of bytes pushed back.
- * @since 1.7
- */
- protected void pushedBackBytes(final long pushedBack) {
- bytesRead -= pushedBack;
- }
-
- /**
- * Returns the current number of bytes read from this stream.
- * @return the number of read bytes
- * @deprecated this method may yield wrong results for large
- * archives, use #getBytesRead instead
- */
- @Deprecated
- public int getCount() {
- return (int) bytesRead;
- }
-
- /**
- * Returns the current number of bytes read from this stream.
- * @return the number of read bytes
- *
- * @since 1.1
- */
- public long getBytesRead() {
- return bytesRead;
- }
-
- /**
- * Returns the amount of raw or compressed bytes read by the stream.
- *
- *
This implementation invokes {@link #getBytesRead}.
- *
- *
Provides half of {@link
- * org.apache.commons.compress.utils.InputStreamStatistics}
- * without forcing subclasses to implement the other half.
- *
- * @return the amount of decompressed bytes returned by the stream
- * @since 1.17
- */
- public long getUncompressedCount() {
- return getBytesRead();
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/CompressorOutputStream.java b/src/org/apache/commons/compress/compressors/CompressorOutputStream.java
deleted file mode 100644
index 51eee9cee61..00000000000
--- a/src/org/apache/commons/compress/compressors/CompressorOutputStream.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors;
-
-import java.io.OutputStream;
-
-public abstract class CompressorOutputStream extends OutputStream {
- // TODO
-}
diff --git a/src/org/apache/commons/compress/compressors/CompressorStreamFactory.java b/src/org/apache/commons/compress/compressors/CompressorStreamFactory.java
deleted file mode 100644
index d730b9de41f..00000000000
--- a/src/org/apache/commons/compress/compressors/CompressorStreamFactory.java
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Locale;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.apache.commons.compress.compressors.brotli.BrotliCompressorInputStream;
-import org.apache.commons.compress.compressors.brotli.BrotliUtils;
-import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
-import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream;
-import org.apache.commons.compress.compressors.deflate.DeflateCompressorInputStream;
-import org.apache.commons.compress.compressors.deflate.DeflateCompressorOutputStream;
-import org.apache.commons.compress.compressors.deflate64.Deflate64CompressorInputStream;
-import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
-import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream;
-import org.apache.commons.compress.compressors.lz4.BlockLZ4CompressorInputStream;
-import org.apache.commons.compress.compressors.lz4.BlockLZ4CompressorOutputStream;
-import org.apache.commons.compress.compressors.lz4.FramedLZ4CompressorInputStream;
-import org.apache.commons.compress.compressors.lz4.FramedLZ4CompressorOutputStream;
-import org.apache.commons.compress.compressors.lzma.LZMACompressorInputStream;
-import org.apache.commons.compress.compressors.lzma.LZMACompressorOutputStream;
-import org.apache.commons.compress.compressors.lzma.LZMAUtils;
-import org.apache.commons.compress.compressors.pack200.Pack200CompressorInputStream;
-import org.apache.commons.compress.compressors.pack200.Pack200CompressorOutputStream;
-import org.apache.commons.compress.compressors.snappy.FramedSnappyCompressorInputStream;
-import org.apache.commons.compress.compressors.snappy.FramedSnappyCompressorOutputStream;
-import org.apache.commons.compress.compressors.snappy.SnappyCompressorInputStream;
-import org.apache.commons.compress.compressors.xz.XZCompressorInputStream;
-import org.apache.commons.compress.compressors.xz.XZCompressorOutputStream;
-import org.apache.commons.compress.compressors.xz.XZUtils;
-import org.apache.commons.compress.compressors.z.ZCompressorInputStream;
-import org.apache.commons.compress.compressors.zstandard.ZstdCompressorInputStream;
-import org.apache.commons.compress.compressors.zstandard.ZstdCompressorOutputStream;
-import org.apache.commons.compress.compressors.zstandard.ZstdUtils;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.Lists;
-import org.apache.commons.compress.utils.ServiceLoaderIterator;
-import org.apache.commons.compress.utils.Sets;
-
-/**
- *
- * Factory to create Compressor[In|Out]putStreams from names. To add other
- * implementations you should extend CompressorStreamFactory and override the
- * appropriate methods (and call their implementation from super of course).
- *
- *
- * Example (Compressing a file):
- *
- *
- * final OutputStream out = Files.newOutputStream(output.toPath());
- * CompressorOutputStream cos = new CompressorStreamFactory()
- * .createCompressorOutputStream(CompressorStreamFactory.BZIP2, out);
- * IOUtils.copy(Files.newInputStream(input.toPath()), cos);
- * cos.close();
- *
- *
- * Example (Decompressing a file):
- *
- *
- * final InputStream is = Files.newInputStream(input.toPath());
- * CompressorInputStream in = new CompressorStreamFactory().createCompressorInputStream(CompressorStreamFactory.BZIP2,
- * is);
- * IOUtils.copy(in, Files.newOutputStream(output.toPath()));
- * in.close();
- *
- *
- * @Immutable provided that the deprecated method setDecompressConcatenated is
- * not used.
- * @ThreadSafe even if the deprecated method setDecompressConcatenated is used
- */
-public class CompressorStreamFactory implements CompressorStreamProvider {
-
- private static final CompressorStreamFactory SINGLETON = new CompressorStreamFactory();
-
-
-
- /**
- * Constant (value {@value}) used to identify the BROTLI compression
- * algorithm.
- *
- * @since 1.14
- */
- public static final String BROTLI = "br";
-
- /**
- * Constant (value {@value}) used to identify the BZIP2 compression
- * algorithm.
- *
- * @since 1.1
- */
- public static final String BZIP2 = "bzip2";
-
- /**
- * Constant (value {@value}) used to identify the GZIP compression
- * algorithm.
- *
- * @since 1.1
- */
- public static final String GZIP = "gz";
-
- /**
- * Constant (value {@value}) used to identify the PACK200 compression
- * algorithm.
- *
- * @since 1.3
- */
- public static final String PACK200 = "pack200";
-
- /**
- * Constant (value {@value}) used to identify the XZ compression method.
- *
- * @since 1.4
- */
- public static final String XZ = "xz";
-
- /**
- * Constant (value {@value}) used to identify the LZMA compression method.
- *
- * @since 1.6
- */
- public static final String LZMA = "lzma";
-
- /**
- * Constant (value {@value}) used to identify the "framed" Snappy
- * compression method.
- *
- * @since 1.7
- */
- public static final String SNAPPY_FRAMED = "snappy-framed";
-
- /**
- * Constant (value {@value}) used to identify the "raw" Snappy compression
- * method. Not supported as an output stream type.
- *
- * @since 1.7
- */
- public static final String SNAPPY_RAW = "snappy-raw";
-
- /**
- * Constant (value {@value}) used to identify the traditional Unix compress
- * method. Not supported as an output stream type.
- *
- * @since 1.7
- */
- public static final String Z = "z";
-
- /**
- * Constant (value {@value}) used to identify the Deflate compress method.
- *
- * @since 1.9
- */
- public static final String DEFLATE = "deflate";
-
- /**
- * Constant (value {@value}) used to identify the Deflate64 compress method.
- *
- * @since 1.16
- */
- public static final String DEFLATE64 = "deflate64";
-
- /**
- * Constant (value {@value}) used to identify the block LZ4
- * compression method.
- *
- * @since 1.14
- */
- public static final String LZ4_BLOCK = "lz4-block";
-
- /**
- * Constant (value {@value}) used to identify the frame LZ4
- * compression method.
- *
- * @since 1.14
- */
- public static final String LZ4_FRAMED = "lz4-framed";
-
- /**
- * Constant (value {@value}) used to identify the Zstandard compression
- * algorithm. Not supported as an output stream type.
- *
- * @since 1.16
- */
- public static final String ZSTANDARD = "zstd";
-
- private static final String YOU_NEED_BROTLI_DEC = youNeed("Google Brotli Dec", "https://github.com/google/brotli/");
- private static final String YOU_NEED_XZ_JAVA = youNeed("XZ for Java", "https://tukaani.org/xz/java.html");
- private static final String YOU_NEED_ZSTD_JNI = youNeed("Zstd JNI", "https://github.com/luben/zstd-jni");
-
- private static String youNeed(String name, String url) {
- return " In addition to Apache Commons Compress you need the " + name + " library - see " + url;
- }
-
- /**
- * Constructs a new sorted map from input stream provider names to provider
- * objects.
- *
- *
- * The map returned by this method will have one entry for each provider for
- * which support is available in the current Java virtual machine. If two or
- * more supported provider have the same name then the resulting map will
- * contain just one of them; which one it will contain is not specified.
- *
- *
- *
- * The invocation of this method, and the subsequent use of the resulting
- * map, may cause time-consuming disk or network I/O operations to occur.
- * This method is provided for applications that need to enumerate all of
- * the available providers, for example to allow user provider selection.
- *
- *
- *
- * This method may return different results at different times if new
- * providers are dynamically made available to the current Java virtual
- * machine.
- *
- *
- * @return An immutable, map from names to provider objects
- * @since 1.13
- */
- public static SortedMap findAvailableCompressorInputStreamProviders() {
- return AccessController.doPrivileged(new PrivilegedAction>() {
- @Override
- public SortedMap run() {
- final TreeMap map = new TreeMap<>();
- putAll(SINGLETON.getInputStreamCompressorNames(), SINGLETON, map);
- for (final CompressorStreamProvider provider : findCompressorStreamProviders()) {
- putAll(provider.getInputStreamCompressorNames(), provider, map);
- }
- return map;
- }
- });
- }
-
- /**
- * Constructs a new sorted map from output stream provider names to provider
- * objects.
- *
- *
- * The map returned by this method will have one entry for each provider for
- * which support is available in the current Java virtual machine. If two or
- * more supported provider have the same name then the resulting map will
- * contain just one of them; which one it will contain is not specified.
- *
- *
- *
- * The invocation of this method, and the subsequent use of the resulting
- * map, may cause time-consuming disk or network I/O operations to occur.
- * This method is provided for applications that need to enumerate all of
- * the available providers, for example to allow user provider selection.
- *
- *
- *
- * This method may return different results at different times if new
- * providers are dynamically made available to the current Java virtual
- * machine.
- *
- *
- * @return An immutable, map from names to provider objects
- * @since 1.13
- */
- public static SortedMap findAvailableCompressorOutputStreamProviders() {
- return AccessController.doPrivileged(new PrivilegedAction>() {
- @Override
- public SortedMap run() {
- final TreeMap map = new TreeMap<>();
- putAll(SINGLETON.getOutputStreamCompressorNames(), SINGLETON, map);
- for (final CompressorStreamProvider provider : findCompressorStreamProviders()) {
- putAll(provider.getOutputStreamCompressorNames(), provider, map);
- }
- return map;
- }
-
- });
- }
- private static ArrayList findCompressorStreamProviders() {
- return Lists.newArrayList(serviceLoaderIterator());
- }
-
- public static String getBrotli() {
- return BROTLI;
- }
-
- public static String getBzip2() {
- return BZIP2;
- }
-
- public static String getDeflate() {
- return DEFLATE;
- }
-
- /**
- * @since 1.16
- * @return the constant {@link #DEFLATE64}
- */
- public static String getDeflate64() {
- return DEFLATE64;
- }
-
- public static String getGzip() {
- return GZIP;
- }
-
- public static String getLzma() {
- return LZMA;
- }
-
- public static String getPack200() {
- return PACK200;
- }
-
- public static CompressorStreamFactory getSingleton() {
- return SINGLETON;
- }
-
- public static String getSnappyFramed() {
- return SNAPPY_FRAMED;
- }
-
- public static String getSnappyRaw() {
- return SNAPPY_RAW;
- }
-
- public static String getXz() {
- return XZ;
- }
-
- public static String getZ() {
- return Z;
- }
-
- public static String getLZ4Framed() {
- return LZ4_FRAMED;
- }
-
- public static String getLZ4Block() {
- return LZ4_BLOCK;
- }
-
- public static String getZstandard() {
- return ZSTANDARD;
- }
-
- static void putAll(final Set names, final CompressorStreamProvider provider,
- final TreeMap map) {
- for (final String name : names) {
- map.put(toKey(name), provider);
- }
- }
-
- private static Iterator serviceLoaderIterator() {
- return new ServiceLoaderIterator<>(CompressorStreamProvider.class);
- }
-
- private static String toKey(final String name) {
- return name.toUpperCase(Locale.ROOT);
- }
-
- /**
- * If true, decompress until the end of the input. If false, stop after the
- * first stream and leave the input position to point to the next byte after
- * the stream
- */
- private final Boolean decompressUntilEOF;
- // This is Boolean so setDecompressConcatenated can determine whether it has
- // been set by the ctor
- // once the setDecompressConcatenated method has been removed, it can revert
- // to boolean
-
- private SortedMap compressorInputStreamProviders;
-
- private SortedMap compressorOutputStreamProviders;
-
- /**
- * If true, decompress until the end of the input. If false, stop after the
- * first stream and leave the input position to point to the next byte after
- * the stream
- */
- private volatile boolean decompressConcatenated = false;
-
- private final int memoryLimitInKb;
- /**
- * Create an instance with the decompress Concatenated option set to false.
- */
- public CompressorStreamFactory() {
- this.decompressUntilEOF = null;
- this.memoryLimitInKb = -1;
- }
-
- /**
- * Create an instance with the provided decompress Concatenated option.
- *
- * @param decompressUntilEOF
- * if true, decompress until the end of the input; if false, stop
- * after the first stream and leave the input position to point
- * to the next byte after the stream. This setting applies to the
- * gzip, bzip2 and xz formats only.
- *
- * @param memoryLimitInKb
- * Some streams require allocation of potentially significant
- * byte arrays/tables, and they can offer checks to prevent OOMs
- * on corrupt files. Set the maximum allowed memory allocation in KBs.
- *
- * @since 1.14
- */
- public CompressorStreamFactory(final boolean decompressUntilEOF, final int memoryLimitInKb) {
- this.decompressUntilEOF = decompressUntilEOF;
- // Also copy to existing variable so can continue to use that as the
- // current value
- this.decompressConcatenated = decompressUntilEOF;
- this.memoryLimitInKb = memoryLimitInKb;
- }
-
-
- /**
- * Create an instance with the provided decompress Concatenated option.
- *
- * @param decompressUntilEOF
- * if true, decompress until the end of the input; if false, stop
- * after the first stream and leave the input position to point
- * to the next byte after the stream. This setting applies to the
- * gzip, bzip2 and xz formats only.
- * @since 1.10
- */
- public CompressorStreamFactory(final boolean decompressUntilEOF) {
- this(decompressUntilEOF, -1);
- }
-
- /**
- * Try to detect the type of compressor stream.
- *
- * @param in input stream
- * @return type of compressor stream detected
- * @throws CompressorException if no compressor stream type was detected
- * or if something else went wrong
- * @throws IllegalArgumentException if stream is null or does not support mark
- *
- * @since 1.14
- */
- public static String detect(final InputStream in) throws CompressorException {
- if (in == null) {
- throw new IllegalArgumentException("Stream must not be null.");
- }
-
- if (!in.markSupported()) {
- throw new IllegalArgumentException("Mark is not supported.");
- }
-
- final byte[] signature = new byte[12];
- in.mark(signature.length);
- int signatureLength = -1;
- try {
- signatureLength = IOUtils.readFully(in, signature);
- in.reset();
- } catch (IOException e) {
- throw new CompressorException("IOException while reading signature.", e);
- }
-
- if (BZip2CompressorInputStream.matches(signature, signatureLength)) {
- return BZIP2;
- }
-
- if (GzipCompressorInputStream.matches(signature, signatureLength)) {
- return GZIP;
- }
-
- if (Pack200CompressorInputStream.matches(signature, signatureLength)) {
- return PACK200;
- }
-
- if (FramedSnappyCompressorInputStream.matches(signature, signatureLength)) {
- return SNAPPY_FRAMED;
- }
-
- if (ZCompressorInputStream.matches(signature, signatureLength)) {
- return Z;
- }
-
- if (DeflateCompressorInputStream.matches(signature, signatureLength)) {
- return DEFLATE;
- }
-
- if (XZUtils.matches(signature, signatureLength)) {
- return XZ;
- }
-
- if (LZMAUtils.matches(signature, signatureLength)) {
- return LZMA;
- }
-
- if (FramedLZ4CompressorInputStream.matches(signature, signatureLength)) {
- return LZ4_FRAMED;
- }
-
- if (ZstdUtils.matches(signature, signatureLength)) {
- return ZSTANDARD;
- }
-
- throw new CompressorException("No Compressor found for the stream signature.");
- }
- /**
- * Create an compressor input stream from an input stream, autodetecting the
- * compressor type from the first few bytes of the stream. The InputStream
- * must support marks, like BufferedInputStream.
- *
- * @param in
- * the input stream
- * @return the compressor input stream
- * @throws CompressorException
- * if the compressor name is not known
- * @throws IllegalArgumentException
- * if the stream is null or does not support mark
- * @since 1.1
- */
- public CompressorInputStream createCompressorInputStream(final InputStream in) throws CompressorException {
- return createCompressorInputStream(detect(in), in);
- }
-
- /**
- * Creates a compressor input stream from a compressor name and an input
- * stream.
- *
- * @param name
- * of the compressor, i.e. {@value #GZIP}, {@value #BZIP2},
- * {@value #XZ}, {@value #LZMA}, {@value #PACK200},
- * {@value #SNAPPY_RAW}, {@value #SNAPPY_FRAMED}, {@value #Z},
- * {@value #LZ4_BLOCK}, {@value #LZ4_FRAMED}, {@value #ZSTANDARD},
- * {@value #DEFLATE64}
- * or {@value #DEFLATE}
- * @param in
- * the input stream
- * @return compressor input stream
- * @throws CompressorException
- * if the compressor name is not known or not available,
- * or if there's an IOException or MemoryLimitException thrown
- * during initialization
- * @throws IllegalArgumentException
- * if the name or input stream is null
- */
- public CompressorInputStream createCompressorInputStream(final String name, final InputStream in)
- throws CompressorException {
- return createCompressorInputStream(name, in, decompressConcatenated);
- }
-
- @Override
- public CompressorInputStream createCompressorInputStream(final String name, final InputStream in,
- final boolean actualDecompressConcatenated) throws CompressorException {
- if (name == null || in == null) {
- throw new IllegalArgumentException("Compressor name and stream must not be null.");
- }
-
- try {
-
- if (GZIP.equalsIgnoreCase(name)) {
- return new GzipCompressorInputStream(in, actualDecompressConcatenated);
- }
-
- if (BZIP2.equalsIgnoreCase(name)) {
- return new BZip2CompressorInputStream(in, actualDecompressConcatenated);
- }
-
- if (BROTLI.equalsIgnoreCase(name)) {
- if (!BrotliUtils.isBrotliCompressionAvailable()) {
- throw new CompressorException("Brotli compression is not available." + YOU_NEED_BROTLI_DEC);
- }
- return new BrotliCompressorInputStream(in);
- }
-
- if (XZ.equalsIgnoreCase(name)) {
- if (!XZUtils.isXZCompressionAvailable()) {
- throw new CompressorException("XZ compression is not available." + YOU_NEED_XZ_JAVA);
- }
- return new XZCompressorInputStream(in, actualDecompressConcatenated, memoryLimitInKb);
- }
-
- if (ZSTANDARD.equalsIgnoreCase(name)) {
- if (!ZstdUtils.isZstdCompressionAvailable()) {
- throw new CompressorException("Zstandard compression is not available." + YOU_NEED_ZSTD_JNI);
- }
- return new ZstdCompressorInputStream(in);
- }
-
- if (LZMA.equalsIgnoreCase(name)) {
- if (!LZMAUtils.isLZMACompressionAvailable()) {
- throw new CompressorException("LZMA compression is not available" + YOU_NEED_XZ_JAVA);
- }
- return new LZMACompressorInputStream(in, memoryLimitInKb);
- }
-
- if (PACK200.equalsIgnoreCase(name)) {
- return new Pack200CompressorInputStream(in);
- }
-
- if (SNAPPY_RAW.equalsIgnoreCase(name)) {
- return new SnappyCompressorInputStream(in);
- }
-
- if (SNAPPY_FRAMED.equalsIgnoreCase(name)) {
- return new FramedSnappyCompressorInputStream(in);
- }
-
- if (Z.equalsIgnoreCase(name)) {
- return new ZCompressorInputStream(in, memoryLimitInKb);
- }
-
- if (DEFLATE.equalsIgnoreCase(name)) {
- return new DeflateCompressorInputStream(in);
- }
-
- if (DEFLATE64.equalsIgnoreCase(name)) {
- return new Deflate64CompressorInputStream(in);
- }
-
- if (LZ4_BLOCK.equalsIgnoreCase(name)) {
- return new BlockLZ4CompressorInputStream(in);
- }
-
- if (LZ4_FRAMED.equalsIgnoreCase(name)) {
- return new FramedLZ4CompressorInputStream(in, actualDecompressConcatenated);
- }
-
- } catch (final IOException e) {
- throw new CompressorException("Could not create CompressorInputStream.", e);
- }
- final CompressorStreamProvider compressorStreamProvider = getCompressorInputStreamProviders().get(toKey(name));
- if (compressorStreamProvider != null) {
- return compressorStreamProvider.createCompressorInputStream(name, in, actualDecompressConcatenated);
- }
-
- throw new CompressorException("Compressor: " + name + " not found.");
- }
-
- /**
- * Creates an compressor output stream from an compressor name and an output
- * stream.
- *
- * @param name
- * the compressor name, i.e. {@value #GZIP}, {@value #BZIP2},
- * {@value #XZ}, {@value #PACK200}, {@value #SNAPPY_FRAMED},
- * {@value #LZ4_BLOCK}, {@value #LZ4_FRAMED}, {@value #ZSTANDARD}
- * or {@value #DEFLATE}
- * @param out
- * the output stream
- * @return the compressor output stream
- * @throws CompressorException
- * if the archiver name is not known
- * @throws IllegalArgumentException
- * if the archiver name or stream is null
- */
- @Override
- public CompressorOutputStream createCompressorOutputStream(final String name, final OutputStream out)
- throws CompressorException {
- if (name == null || out == null) {
- throw new IllegalArgumentException("Compressor name and stream must not be null.");
- }
-
- try {
-
- if (GZIP.equalsIgnoreCase(name)) {
- return new GzipCompressorOutputStream(out);
- }
-
- if (BZIP2.equalsIgnoreCase(name)) {
- return new BZip2CompressorOutputStream(out);
- }
-
- if (XZ.equalsIgnoreCase(name)) {
- return new XZCompressorOutputStream(out);
- }
-
- if (PACK200.equalsIgnoreCase(name)) {
- return new Pack200CompressorOutputStream(out);
- }
-
- if (LZMA.equalsIgnoreCase(name)) {
- return new LZMACompressorOutputStream(out);
- }
-
- if (DEFLATE.equalsIgnoreCase(name)) {
- return new DeflateCompressorOutputStream(out);
- }
-
- if (SNAPPY_FRAMED.equalsIgnoreCase(name)) {
- return new FramedSnappyCompressorOutputStream(out);
- }
-
- if (LZ4_BLOCK.equalsIgnoreCase(name)) {
- return new BlockLZ4CompressorOutputStream(out);
- }
-
- if (LZ4_FRAMED.equalsIgnoreCase(name)) {
- return new FramedLZ4CompressorOutputStream(out);
- }
-
- if (ZSTANDARD.equalsIgnoreCase(name)) {
- return new ZstdCompressorOutputStream(out);
- }
- } catch (final IOException e) {
- throw new CompressorException("Could not create CompressorOutputStream", e);
- }
- final CompressorStreamProvider compressorStreamProvider = getCompressorOutputStreamProviders().get(toKey(name));
- if (compressorStreamProvider != null) {
- return compressorStreamProvider.createCompressorOutputStream(name, out);
- }
- throw new CompressorException("Compressor: " + name + " not found.");
- }
-
- public SortedMap getCompressorInputStreamProviders() {
- if (compressorInputStreamProviders == null) {
- compressorInputStreamProviders = Collections
- .unmodifiableSortedMap(findAvailableCompressorInputStreamProviders());
- }
- return compressorInputStreamProviders;
- }
-
- public SortedMap getCompressorOutputStreamProviders() {
- if (compressorOutputStreamProviders == null) {
- compressorOutputStreamProviders = Collections
- .unmodifiableSortedMap(findAvailableCompressorOutputStreamProviders());
- }
- return compressorOutputStreamProviders;
- }
-
- // For Unit tests
- boolean getDecompressConcatenated() {
- return decompressConcatenated;
- }
-
- public Boolean getDecompressUntilEOF() {
- return decompressUntilEOF;
- }
-
- @Override
- public Set getInputStreamCompressorNames() {
- return Sets.newHashSet(GZIP, BROTLI, BZIP2, XZ, LZMA, PACK200, DEFLATE, SNAPPY_RAW, SNAPPY_FRAMED, Z, LZ4_BLOCK,
- LZ4_FRAMED, ZSTANDARD, DEFLATE64);
- }
-
- @Override
- public Set getOutputStreamCompressorNames() {
- return Sets.newHashSet(GZIP, BZIP2, XZ, LZMA, PACK200, DEFLATE, SNAPPY_FRAMED, LZ4_BLOCK, LZ4_FRAMED, ZSTANDARD);
- }
-
- /**
- * Whether to decompress the full input or only the first stream in formats
- * supporting multiple concatenated input streams.
- *
- *
- * This setting applies to the gzip, bzip2 and xz formats only.
- *
- *
- * @param decompressConcatenated
- * if true, decompress until the end of the input; if false, stop
- * after the first stream and leave the input position to point
- * to the next byte after the stream
- * @since 1.5
- * @deprecated 1.10 use the {@link #CompressorStreamFactory(boolean)}
- * constructor instead
- * @throws IllegalStateException
- * if the constructor {@link #CompressorStreamFactory(boolean)}
- * was used to create the factory
- */
- @Deprecated
- public void setDecompressConcatenated(final boolean decompressConcatenated) {
- if (this.decompressUntilEOF != null) {
- throw new IllegalStateException("Cannot override the setting defined by the constructor");
- }
- this.decompressConcatenated = decompressConcatenated;
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/CompressorStreamProvider.java b/src/org/apache/commons/compress/compressors/CompressorStreamProvider.java
deleted file mode 100644
index b0c843123b8..00000000000
--- a/src/org/apache/commons/compress/compressors/CompressorStreamProvider.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors;
-
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Set;
-
-/**
- * Creates Compressor {@link CompressorInputStream}s and
- * {@link CompressorOutputStream}s.
- *
- * @since 1.13
- */
-public interface CompressorStreamProvider {
-
- /**
- * Creates a compressor input stream from a compressor name and an input
- * stream.
- *
- * @param name
- * of the compressor, i.e.
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#GZIP},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#BZIP2},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#XZ},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#LZMA},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#PACK200},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#SNAPPY_RAW},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#SNAPPY_FRAMED},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#Z}
- * or
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#DEFLATE}
- * @param in
- * the input stream
- * @param decompressUntilEOF
- * if true, decompress until the end of the input; if false, stop
- * after the first stream and leave the input position to point
- * to the next byte after the stream. This setting applies to the
- * gzip, bzip2 and xz formats only.
- * @return compressor input stream
- * @throws CompressorException
- * if the compressor name is not known
- * @throws IllegalArgumentException
- * if the name or input stream is null
- */
- CompressorInputStream createCompressorInputStream(final String name, final InputStream in,
- final boolean decompressUntilEOF) throws CompressorException;
-
- /**
- * Creates a compressor output stream from an compressor name and an output
- * stream.
- *
- * @param name
- * the compressor name, i.e.
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#GZIP},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#BZIP2},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#XZ},
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#PACK200}
- * or
- * {@value org.apache.commons.compress.compressors.CompressorStreamFactory#DEFLATE}
- * @param out
- * the output stream
- * @return the compressor output stream
- * @throws CompressorException
- * if the archiver name is not known
- * @throws IllegalArgumentException
- * if the archiver name or stream is null
- */
- CompressorOutputStream createCompressorOutputStream(final String name, final OutputStream out)
- throws CompressorException;
-
- /**
- * Gets all the input stream compressor names for this provider
- *
- * @return all the input compressor names for this provider
- */
- Set getInputStreamCompressorNames();
-
- /**
- * Gets all the output stream compressor names for this provider
- *
- * @return all the output compressor names for this provider
- */
- Set getOutputStreamCompressorNames();
-
-}
diff --git a/src/org/apache/commons/compress/compressors/FileNameUtil.java b/src/org/apache/commons/compress/compressors/FileNameUtil.java
deleted file mode 100644
index f97bb8e4149..00000000000
--- a/src/org/apache/commons/compress/compressors/FileNameUtil.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-
-/**
- * File name mapping code for the compression formats.
- * @ThreadSafe
- * @since 1.4
- */
-public class FileNameUtil {
-
- /**
- * Map from common filename suffixes to the suffixes that identify compressed
- * versions of those file types. For example: from ".tar" to ".tgz".
- */
- private final Map compressSuffix =
- new HashMap<>();
-
- /**
- * Map from common filename suffixes of compressed files to the
- * corresponding suffixes of uncompressed files. For example: from
- * ".tgz" to ".tar".
- *
- * This map also contains format-specific suffixes like ".gz" and "-z".
- * These suffixes are mapped to the empty string, as they should simply
- * be removed from the filename when the file is uncompressed.
- */
- private final Map uncompressSuffix;
-
- /**
- * Length of the longest compressed suffix.
- */
- private final int longestCompressedSuffix;
-
- /**
- * Length of the shortest compressed suffix.
- */
- private final int shortestCompressedSuffix;
-
- /**
- * Length of the longest uncompressed suffix.
- */
- private final int longestUncompressedSuffix;
-
- /**
- * Length of the shortest uncompressed suffix longer than the
- * empty string.
- */
- private final int shortestUncompressedSuffix;
-
- /**
- * The format's default extension.
- */
- private final String defaultExtension;
-
- /**
- * sets up the utility with a map of known compressed to
- * uncompressed suffix mappings and the default extension of the
- * format.
- *
- * @param uncompressSuffix Map from common filename suffixes of
- * compressed files to the corresponding suffixes of uncompressed
- * files. For example: from ".tgz" to ".tar". This map also
- * contains format-specific suffixes like ".gz" and "-z". These
- * suffixes are mapped to the empty string, as they should simply
- * be removed from the filename when the file is uncompressed.
- *
- * @param defaultExtension the format's default extension like ".gz"
- */
- public FileNameUtil(final Map uncompressSuffix,
- final String defaultExtension) {
- this.uncompressSuffix = Collections.unmodifiableMap(uncompressSuffix);
- int lc = Integer.MIN_VALUE, sc = Integer.MAX_VALUE;
- int lu = Integer.MIN_VALUE, su = Integer.MAX_VALUE;
- for (final Map.Entry ent : uncompressSuffix.entrySet()) {
- final int cl = ent.getKey().length();
- if (cl > lc) {
- lc = cl;
- }
- if (cl < sc) {
- sc = cl;
- }
-
- final String u = ent.getValue();
- final int ul = u.length();
- if (ul > 0) {
- if (!compressSuffix.containsKey(u)) {
- compressSuffix.put(u, ent.getKey());
- }
- if (ul > lu) {
- lu = ul;
- }
- if (ul < su) {
- su = ul;
- }
- }
- }
- longestCompressedSuffix = lc;
- longestUncompressedSuffix = lu;
- shortestCompressedSuffix = sc;
- shortestUncompressedSuffix = su;
- this.defaultExtension = defaultExtension;
- }
-
- /**
- * Detects common format suffixes in the given filename.
- *
- * @param filename name of a file
- * @return {@code true} if the filename has a common format suffix,
- * {@code false} otherwise
- */
- public boolean isCompressedFilename(final String filename) {
- final String lower = filename.toLowerCase(Locale.ENGLISH);
- final int n = lower.length();
- for (int i = shortestCompressedSuffix;
- i <= longestCompressedSuffix && i < n; i++) {
- if (uncompressSuffix.containsKey(lower.substring(n - i))) {
- return true;
- }
- }
- return false;
- }
-
- /**
- * Maps the given name of a compressed file to the name that the
- * file should have after uncompression. Commonly used file type specific
- * suffixes like ".tgz" or ".svgz" are automatically detected and
- * correctly mapped. For example the name "package.tgz" is mapped to
- * "package.tar". And any filenames with the generic ".gz" suffix
- * (or any other generic gzip suffix) is mapped to a name without that
- * suffix. If no format suffix is detected, then the filename is returned
- * unmapped.
- *
- * @param filename name of a file
- * @return name of the corresponding uncompressed file
- */
- public String getUncompressedFilename(final String filename) {
- final String lower = filename.toLowerCase(Locale.ENGLISH);
- final int n = lower.length();
- for (int i = shortestCompressedSuffix;
- i <= longestCompressedSuffix && i < n; i++) {
- final String suffix = uncompressSuffix.get(lower.substring(n - i));
- if (suffix != null) {
- return filename.substring(0, n - i) + suffix;
- }
- }
- return filename;
- }
-
- /**
- * Maps the given filename to the name that the file should have after
- * compression. Common file types with custom suffixes for
- * compressed versions are automatically detected and correctly mapped.
- * For example the name "package.tar" is mapped to "package.tgz". If no
- * custom mapping is applicable, then the default ".gz" suffix is appended
- * to the filename.
- *
- * @param filename name of a file
- * @return name of the corresponding compressed file
- */
- public String getCompressedFilename(final String filename) {
- final String lower = filename.toLowerCase(Locale.ENGLISH);
- final int n = lower.length();
- for (int i = shortestUncompressedSuffix;
- i <= longestUncompressedSuffix && i < n; i++) {
- final String suffix = compressSuffix.get(lower.substring(n - i));
- if (suffix != null) {
- return filename.substring(0, n - i) + suffix;
- }
- }
- // No custom suffix found, just append the default
- return filename + defaultExtension;
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/brotli/BrotliCompressorInputStream.java b/src/org/apache/commons/compress/compressors/brotli/BrotliCompressorInputStream.java
deleted file mode 100644
index 5674cb220c4..00000000000
--- a/src/org/apache/commons/compress/compressors/brotli/BrotliCompressorInputStream.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.compress.compressors.brotli;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-import org.brotli.dec.BrotliInputStream;
-
-/**
- * {@link CompressorInputStream} implementation to decode Brotli encoded stream.
- * Library relies on Google brotli
- *
- * @since 1.14
- */
-public class BrotliCompressorInputStream extends CompressorInputStream
- implements InputStreamStatistics {
-
- private final CountingInputStream countingStream;
- private final BrotliInputStream decIS;
-
- public BrotliCompressorInputStream(final InputStream in) throws IOException {
- decIS = new BrotliInputStream(countingStream = new CountingInputStream(in));
- }
-
- @Override
- public int available() throws IOException {
- return decIS.available();
- }
-
- @Override
- public void close() throws IOException {
- decIS.close();
- }
-
- @Override
- public int read(final byte[] b) throws IOException {
- return decIS.read(b);
- }
-
- @Override
- public long skip(final long n) throws IOException {
- return IOUtils.skip(decIS, n);
- }
-
- @Override
- public void mark(final int readlimit) {
- decIS.mark(readlimit);
- }
-
- @Override
- public boolean markSupported() {
- return decIS.markSupported();
- }
-
- @Override
- public int read() throws IOException {
- final int ret = decIS.read();
- count(ret == -1 ? 0 : 1);
- return ret;
- }
-
- @Override
- public int read(final byte[] buf, final int off, final int len) throws IOException {
- final int ret = decIS.read(buf, off, len);
- count(ret);
- return ret;
- }
-
- @Override
- public String toString() {
- return decIS.toString();
- }
-
- @Override
- public void reset() throws IOException {
- decIS.reset();
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return countingStream.getBytesRead();
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/brotli/BrotliUtils.java b/src/org/apache/commons/compress/compressors/brotli/BrotliUtils.java
deleted file mode 100644
index 15a21399d2e..00000000000
--- a/src/org/apache/commons/compress/compressors/brotli/BrotliUtils.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.brotli;
-
-/**
- * Utility code for the Brotli compression format.
- * @ThreadSafe
- * @since 1.14
- */
-public class BrotliUtils {
-
- enum CachedAvailability {
- DONT_CACHE, CACHED_AVAILABLE, CACHED_UNAVAILABLE
- }
-
- private static volatile CachedAvailability cachedBrotliAvailability;
-
- static {
- cachedBrotliAvailability = CachedAvailability.DONT_CACHE;
- try {
- Class.forName("org.osgi.framework.BundleEvent");
- } catch (final Exception ex) { // NOSONAR
- setCacheBrotliAvailablity(true);
- }
- }
-
- /** Private constructor to prevent instantiation of this utility class. */
- private BrotliUtils() {
- }
-
-
- /**
- * Are the classes required to support Brotli compression available?
- * @return true if the classes required to support Brotli compression are available
- */
- public static boolean isBrotliCompressionAvailable() {
- final CachedAvailability cachedResult = cachedBrotliAvailability;
- if (cachedResult != CachedAvailability.DONT_CACHE) {
- return cachedResult == CachedAvailability.CACHED_AVAILABLE;
- }
- return internalIsBrotliCompressionAvailable();
- }
-
- private static boolean internalIsBrotliCompressionAvailable() {
- try {
- Class.forName("org.brotli.dec.BrotliInputStream");
- return true;
- } catch (NoClassDefFoundError | Exception error) { // NOSONAR
- return false;
- }
- }
-
- /**
- * Whether to cache the result of the Brotli for Java check.
- *
- *
This defaults to {@code false} in an OSGi environment and {@code true} otherwise.
- * @param doCache whether to cache the result
- */
- public static void setCacheBrotliAvailablity(final boolean doCache) {
- if (!doCache) {
- cachedBrotliAvailability = CachedAvailability.DONT_CACHE;
- } else if (cachedBrotliAvailability == CachedAvailability.DONT_CACHE) {
- final boolean hasBrotli = internalIsBrotliCompressionAvailable();
- cachedBrotliAvailability = hasBrotli ? CachedAvailability.CACHED_AVAILABLE
- : CachedAvailability.CACHED_UNAVAILABLE;
- }
- }
-
- // only exists to support unit tests
- static CachedAvailability getCachedBrotliAvailability() {
- return cachedBrotliAvailability;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/brotli/package.html b/src/org/apache/commons/compress/compressors/brotli/package.html
deleted file mode 100644
index 7654cf67327..00000000000
--- a/src/org/apache/commons/compress/compressors/brotli/package.html
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-
-
Provides stream class for decompressing streams using the
- Brotli algorithm based
- on Google's Brotli
- decoder.
-
-
diff --git a/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorInputStream.java b/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorInputStream.java
deleted file mode 100644
index 677bbbd1e93..00000000000
--- a/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorInputStream.java
+++ /dev/null
@@ -1,970 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*
- * This package is based on the work done by Keiron Liddle, Aftex Software
- * to whom the Ant project is very grateful for his
- * great code.
- */
-package org.apache.commons.compress.compressors.bzip2;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteOrder;
-import java.util.Arrays;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.BitInputStream;
-import org.apache.commons.compress.utils.CloseShieldFilterInputStream;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * An input stream that decompresses from the BZip2 format to be read as any other stream.
- *
- * @NotThreadSafe
- */
-public class BZip2CompressorInputStream extends CompressorInputStream
- implements BZip2Constants, InputStreamStatistics {
-
- /**
- * Index of the last char in the block, so the block size == last + 1.
- */
- private int last;
-
- /**
- * Index in zptr[] of original string after sorting.
- */
- private int origPtr;
-
- /**
- * always: in the range 0 .. 9. The current block size is 100000 * this
- * number.
- */
- private int blockSize100k;
-
- private boolean blockRandomised;
-
- private final CRC crc = new CRC();
-
- private int nInUse;
-
- private BitInputStream bin;
- private final boolean decompressConcatenated;
-
- private static final int EOF = 0;
- private static final int START_BLOCK_STATE = 1;
- private static final int RAND_PART_A_STATE = 2;
- private static final int RAND_PART_B_STATE = 3;
- private static final int RAND_PART_C_STATE = 4;
- private static final int NO_RAND_PART_A_STATE = 5;
- private static final int NO_RAND_PART_B_STATE = 6;
- private static final int NO_RAND_PART_C_STATE = 7;
-
- private int currentState = START_BLOCK_STATE;
-
- private int storedBlockCRC, storedCombinedCRC;
- private int computedBlockCRC, computedCombinedCRC;
-
- // Variables used by setup* methods exclusively
-
- private int su_count;
- private int su_ch2;
- private int su_chPrev;
- private int su_i2;
- private int su_j2;
- private int su_rNToGo;
- private int su_rTPos;
- private int su_tPos;
- private char su_z;
-
- /**
- * All memory intensive stuff. This field is initialized by initBlock().
- */
- private BZip2CompressorInputStream.Data data;
-
- /**
- * Constructs a new BZip2CompressorInputStream which decompresses bytes
- * read from the specified stream. This doesn't suppprt decompressing
- * concatenated .bz2 files.
- *
- * @param in the InputStream from which this object should be created
- * @throws IOException
- * if the stream content is malformed or an I/O error occurs.
- * @throws NullPointerException
- * if {@code in == null}
- */
- public BZip2CompressorInputStream(final InputStream in) throws IOException {
- this(in, false);
- }
-
- /**
- * Constructs a new BZip2CompressorInputStream which decompresses bytes
- * read from the specified stream.
- *
- * @param in the InputStream from which this object should be created
- * @param decompressConcatenated
- * if true, decompress until the end of the input;
- * if false, stop after the first .bz2 stream and
- * leave the input position to point to the next
- * byte after the .bz2 stream
- *
- * @throws IOException
- * if {@code in == null}, the stream content is malformed, or an I/O error occurs.
- */
- public BZip2CompressorInputStream(final InputStream in, final boolean decompressConcatenated) throws IOException {
- this.bin = new BitInputStream(in == System.in ? new CloseShieldFilterInputStream(in) : in,
- ByteOrder.BIG_ENDIAN);
- this.decompressConcatenated = decompressConcatenated;
-
- init(true);
- initBlock();
- }
-
- @Override
- public int read() throws IOException {
- if (this.bin != null) {
- final int r = read0();
- count(r < 0 ? -1 : 1);
- return r;
- }
- throw new IOException("stream closed");
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.io.InputStream#read(byte[], int, int)
- */
- @Override
- public int read(final byte[] dest, final int offs, final int len)
- throws IOException {
- if (offs < 0) {
- throw new IndexOutOfBoundsException("offs(" + offs + ") < 0.");
- }
- if (len < 0) {
- throw new IndexOutOfBoundsException("len(" + len + ") < 0.");
- }
- if (offs + len > dest.length) {
- throw new IndexOutOfBoundsException("offs(" + offs + ") + len("
- + len + ") > dest.length(" + dest.length + ").");
- }
- if (this.bin == null) {
- throw new IOException("stream closed");
- }
- if (len == 0) {
- return 0;
- }
-
- final int hi = offs + len;
- int destOffs = offs;
- int b;
- while (destOffs < hi && ((b = read0()) >= 0)) {
- dest[destOffs++] = (byte) b;
- count(1);
- }
-
- return (destOffs == offs) ? -1 : (destOffs - offs);
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return bin.getBytesRead();
- }
-
- private void makeMaps() {
- final boolean[] inUse = this.data.inUse;
- final byte[] seqToUnseq = this.data.seqToUnseq;
-
- int nInUseShadow = 0;
-
- for (int i = 0; i < 256; i++) {
- if (inUse[i]) {
- seqToUnseq[nInUseShadow++] = (byte) i;
- }
- }
-
- this.nInUse = nInUseShadow;
- }
-
- private int read0() throws IOException {
- switch (currentState) {
- case EOF:
- return -1;
-
- case START_BLOCK_STATE:
- return setupBlock();
-
- case RAND_PART_A_STATE:
- throw new IllegalStateException();
-
- case RAND_PART_B_STATE:
- return setupRandPartB();
-
- case RAND_PART_C_STATE:
- return setupRandPartC();
-
- case NO_RAND_PART_A_STATE:
- throw new IllegalStateException();
-
- case NO_RAND_PART_B_STATE:
- return setupNoRandPartB();
-
- case NO_RAND_PART_C_STATE:
- return setupNoRandPartC();
-
- default:
- throw new IllegalStateException();
- }
- }
-
- private int readNextByte(BitInputStream in) throws IOException {
- long b = in.readBits(8);
- return (int) b;
- }
-
- private boolean init(final boolean isFirstStream) throws IOException {
- if (null == bin) {
- throw new IOException("No InputStream");
- }
-
- if (!isFirstStream) {
- bin.clearBitCache();
- }
-
- final int magic0 = readNextByte(this.bin);
- if (magic0 == -1 && !isFirstStream) {
- return false;
- }
- final int magic1 = readNextByte(this.bin);
- final int magic2 = readNextByte(this.bin);
-
- if (magic0 != 'B' || magic1 != 'Z' || magic2 != 'h') {
- throw new IOException(isFirstStream
- ? "Stream is not in the BZip2 format"
- : "Garbage after a valid BZip2 stream");
- }
-
- final int blockSize = readNextByte(this.bin);
- if ((blockSize < '1') || (blockSize > '9')) {
- throw new IOException("BZip2 block size is invalid");
- }
-
- this.blockSize100k = blockSize - '0';
-
- this.computedCombinedCRC = 0;
-
- return true;
- }
-
- private void initBlock() throws IOException {
- BitInputStream bin = this.bin;
- char magic0;
- char magic1;
- char magic2;
- char magic3;
- char magic4;
- char magic5;
-
- while (true) {
- // Get the block magic bytes.
- magic0 = bsGetUByte(bin);
- magic1 = bsGetUByte(bin);
- magic2 = bsGetUByte(bin);
- magic3 = bsGetUByte(bin);
- magic4 = bsGetUByte(bin);
- magic5 = bsGetUByte(bin);
-
- // If isn't end of stream magic, break out of the loop.
- if (magic0 != 0x17 || magic1 != 0x72 || magic2 != 0x45
- || magic3 != 0x38 || magic4 != 0x50 || magic5 != 0x90) {
- break;
- }
-
- // End of stream was reached. Check the combined CRC and
- // advance to the next .bz2 stream if decoding concatenated
- // streams.
- if (complete()) {
- return;
- }
- }
-
- if (magic0 != 0x31 || // '1'
- magic1 != 0x41 || // ')'
- magic2 != 0x59 || // 'Y'
- magic3 != 0x26 || // '&'
- magic4 != 0x53 || // 'S'
- magic5 != 0x59 // 'Y'
- ) {
- this.currentState = EOF;
- throw new IOException("bad block header");
- }
- this.storedBlockCRC = bsGetInt(bin);
- this.blockRandomised = bsR(bin, 1) == 1;
-
- /**
- * Allocate data here instead in constructor, so we do not allocate
- * it if the input file is empty.
- */
- if (this.data == null) {
- this.data = new Data(this.blockSize100k);
- }
-
- // currBlockNo++;
- getAndMoveToFrontDecode();
-
- this.crc.initialiseCRC();
- this.currentState = START_BLOCK_STATE;
- }
-
- private void endBlock() throws IOException {
- this.computedBlockCRC = this.crc.getFinalCRC();
-
- // A bad CRC is considered a fatal error.
- if (this.storedBlockCRC != this.computedBlockCRC) {
- // make next blocks readable without error
- // (repair feature, not yet documented, not tested)
- this.computedCombinedCRC = (this.storedCombinedCRC << 1)
- | (this.storedCombinedCRC >>> 31);
- this.computedCombinedCRC ^= this.storedBlockCRC;
-
- throw new IOException("BZip2 CRC error");
- }
-
- this.computedCombinedCRC = (this.computedCombinedCRC << 1)
- | (this.computedCombinedCRC >>> 31);
- this.computedCombinedCRC ^= this.computedBlockCRC;
- }
-
- private boolean complete() throws IOException {
- this.storedCombinedCRC = bsGetInt(bin);
- this.currentState = EOF;
- this.data = null;
-
- if (this.storedCombinedCRC != this.computedCombinedCRC) {
- throw new IOException("BZip2 CRC error");
- }
-
- // Look for the next .bz2 stream if decompressing
- // concatenated files.
- return !decompressConcatenated || !init(false);
- }
-
- @Override
- public void close() throws IOException {
- final BitInputStream inShadow = this.bin;
- if (inShadow != null) {
- try {
- inShadow.close();
- } finally {
- this.data = null;
- this.bin = null;
- }
- }
- }
-
- /**
- * read bits from the input stream
- * @param n the number of bits to read, must not exceed 32?
- * @return the requested bits combined into an int
- * @throws IOException
- */
- private static int bsR(BitInputStream bin, final int n) throws IOException {
- long thech = bin.readBits(n);
- if (thech < 0) {
- throw new IOException("unexpected end of stream");
- }
- return (int) thech;
- }
-
- private static boolean bsGetBit(BitInputStream bin) throws IOException {
- return bsR(bin, 1) != 0;
- }
-
- private static char bsGetUByte(BitInputStream bin) throws IOException {
- return (char) bsR(bin, 8);
- }
-
- private static int bsGetInt(BitInputStream bin) throws IOException {
- return bsR(bin, 32);
- }
-
- private static void checkBounds(final int checkVal, final int limitExclusive, String name)
- throws IOException {
- if (checkVal < 0) {
- throw new IOException("Corrupted input, " + name + " value negative");
- }
- if (checkVal >= limitExclusive) {
- throw new IOException("Corrupted input, " + name + " value too big");
- }
- }
-
- /**
- * Called by createHuffmanDecodingTables() exclusively.
- */
- private static void hbCreateDecodeTables(final int[] limit,
- final int[] base, final int[] perm, final char[] length,
- final int minLen, final int maxLen, final int alphaSize)
- throws IOException {
- for (int i = minLen, pp = 0; i <= maxLen; i++) {
- for (int j = 0; j < alphaSize; j++) {
- if (length[j] == i) {
- perm[pp++] = j;
- }
- }
- }
-
- for (int i = MAX_CODE_LEN; --i > 0;) {
- base[i] = 0;
- limit[i] = 0;
- }
-
- for (int i = 0; i < alphaSize; i++) {
- final int l = length[i];
- checkBounds(l, MAX_ALPHA_SIZE, "length");
- base[l + 1]++;
- }
-
- for (int i = 1, b = base[0]; i < MAX_CODE_LEN; i++) {
- b += base[i];
- base[i] = b;
- }
-
- for (int i = minLen, vec = 0, b = base[i]; i <= maxLen; i++) {
- final int nb = base[i + 1];
- vec += nb - b;
- b = nb;
- limit[i] = vec - 1;
- vec <<= 1;
- }
-
- for (int i = minLen + 1; i <= maxLen; i++) {
- base[i] = ((limit[i - 1] + 1) << 1) - base[i];
- }
- }
-
- private void recvDecodingTables() throws IOException {
- final BitInputStream bin = this.bin;
- final Data dataShadow = this.data;
- final boolean[] inUse = dataShadow.inUse;
- final byte[] pos = dataShadow.recvDecodingTables_pos;
- final byte[] selector = dataShadow.selector;
- final byte[] selectorMtf = dataShadow.selectorMtf;
-
- int inUse16 = 0;
-
- /* Receive the mapping table */
- for (int i = 0; i < 16; i++) {
- if (bsGetBit(bin)) {
- inUse16 |= 1 << i;
- }
- }
-
- Arrays.fill(inUse, false);
- for (int i = 0; i < 16; i++) {
- if ((inUse16 & (1 << i)) != 0) {
- final int i16 = i << 4;
- for (int j = 0; j < 16; j++) {
- if (bsGetBit(bin)) {
- inUse[i16 + j] = true;
- }
- }
- }
- }
-
- makeMaps();
- final int alphaSize = this.nInUse + 2;
- /* Now the selectors */
- final int nGroups = bsR(bin, 3);
- final int nSelectors = bsR(bin, 15);
- checkBounds(alphaSize, MAX_ALPHA_SIZE + 1, "alphaSize");
- checkBounds(nGroups, N_GROUPS + 1, "nGroups");
- checkBounds(nSelectors, MAX_SELECTORS + 1, "nSelectors");
-
- for (int i = 0; i < nSelectors; i++) {
- int j = 0;
- while (bsGetBit(bin)) {
- j++;
- }
- selectorMtf[i] = (byte) j;
- }
-
- /* Undo the MTF values for the selectors. */
- for (int v = nGroups; --v >= 0;) {
- pos[v] = (byte) v;
- }
-
- for (int i = 0; i < nSelectors; i++) {
- int v = selectorMtf[i] & 0xff;
- checkBounds(v, N_GROUPS, "selectorMtf");
- final byte tmp = pos[v];
- while (v > 0) {
- // nearly all times v is zero, 4 in most other cases
- pos[v] = pos[v - 1];
- v--;
- }
- pos[0] = tmp;
- selector[i] = tmp;
- }
-
- final char[][] len = dataShadow.temp_charArray2d;
-
- /* Now the coding tables */
- for (int t = 0; t < nGroups; t++) {
- int curr = bsR(bin, 5);
- final char[] len_t = len[t];
- for (int i = 0; i < alphaSize; i++) {
- while (bsGetBit(bin)) {
- curr += bsGetBit(bin) ? -1 : 1;
- }
- len_t[i] = (char) curr;
- }
- }
-
- // finally create the Huffman tables
- createHuffmanDecodingTables(alphaSize, nGroups);
- }
-
- /**
- * Called by recvDecodingTables() exclusively.
- */
- private void createHuffmanDecodingTables(final int alphaSize,
- final int nGroups) throws IOException {
- final Data dataShadow = this.data;
- final char[][] len = dataShadow.temp_charArray2d;
- final int[] minLens = dataShadow.minLens;
- final int[][] limit = dataShadow.limit;
- final int[][] base = dataShadow.base;
- final int[][] perm = dataShadow.perm;
-
- for (int t = 0; t < nGroups; t++) {
- int minLen = 32;
- int maxLen = 0;
- final char[] len_t = len[t];
- for (int i = alphaSize; --i >= 0;) {
- final char lent = len_t[i];
- if (lent > maxLen) {
- maxLen = lent;
- }
- if (lent < minLen) {
- minLen = lent;
- }
- }
- hbCreateDecodeTables(limit[t], base[t], perm[t], len[t], minLen,
- maxLen, alphaSize);
- minLens[t] = minLen;
- }
- }
-
- private void getAndMoveToFrontDecode() throws IOException {
- final BitInputStream bin = this.bin;
- this.origPtr = bsR(bin, 24);
- recvDecodingTables();
-
- final Data dataShadow = this.data;
- final byte[] ll8 = dataShadow.ll8;
- final int[] unzftab = dataShadow.unzftab;
- final byte[] selector = dataShadow.selector;
- final byte[] seqToUnseq = dataShadow.seqToUnseq;
- final char[] yy = dataShadow.getAndMoveToFrontDecode_yy;
- final int[] minLens = dataShadow.minLens;
- final int[][] limit = dataShadow.limit;
- final int[][] base = dataShadow.base;
- final int[][] perm = dataShadow.perm;
- final int limitLast = this.blockSize100k * 100000;
-
- /*
- * Setting up the unzftab entries here is not strictly necessary, but it
- * does save having to do it later in a separate pass, and so saves a
- * block's worth of cache misses.
- */
- for (int i = 256; --i >= 0;) {
- yy[i] = (char) i;
- unzftab[i] = 0;
- }
-
- int groupNo = 0;
- int groupPos = G_SIZE - 1;
- final int eob = this.nInUse + 1;
- int nextSym = getAndMoveToFrontDecode0();
- int lastShadow = -1;
- int zt = selector[groupNo] & 0xff;
- checkBounds(zt, N_GROUPS, "zt");
- int[] base_zt = base[zt];
- int[] limit_zt = limit[zt];
- int[] perm_zt = perm[zt];
- int minLens_zt = minLens[zt];
-
- while (nextSym != eob) {
- if ((nextSym == RUNA) || (nextSym == RUNB)) {
- int s = -1;
-
- for (int n = 1; true; n <<= 1) {
- if (nextSym == RUNA) {
- s += n;
- } else if (nextSym == RUNB) {
- s += n << 1;
- } else {
- break;
- }
-
- if (groupPos == 0) {
- groupPos = G_SIZE - 1;
- checkBounds(++groupNo, MAX_SELECTORS, "groupNo");
- zt = selector[groupNo] & 0xff;
- checkBounds(zt, N_GROUPS, "zt");
- base_zt = base[zt];
- limit_zt = limit[zt];
- perm_zt = perm[zt];
- minLens_zt = minLens[zt];
- } else {
- groupPos--;
- }
-
- int zn = minLens_zt;
- checkBounds(zn, MAX_ALPHA_SIZE, "zn");
- int zvec = bsR(bin, zn);
- while(zvec > limit_zt[zn]) {
- checkBounds(++zn, MAX_ALPHA_SIZE, "zn");
- zvec = (zvec << 1) | bsR(bin, 1);
- }
- final int tmp = zvec - base_zt[zn];
- checkBounds(tmp, MAX_ALPHA_SIZE, "zvec");
- nextSym = perm_zt[tmp];
- }
-
- final int yy0 = yy[0];
- checkBounds(yy0, 256, "yy");
- final byte ch = seqToUnseq[yy0];
- unzftab[ch & 0xff] += s + 1;
-
- final int from = ++lastShadow;
- lastShadow += s;
- Arrays.fill(ll8, from, lastShadow + 1, ch);
-
- if (lastShadow >= limitLast) {
- throw new IOException("block overrun while expanding RLE in MTF, "
- + lastShadow + " exceeds " + limitLast);
- }
- } else {
- if (++lastShadow >= limitLast) {
- throw new IOException("block overrun in MTF, "
- + lastShadow + " exceeds " + limitLast);
- }
- checkBounds(nextSym, 256 + 1, "nextSym");
-
- final char tmp = yy[nextSym - 1];
- checkBounds(tmp, 256, "yy");
- unzftab[seqToUnseq[tmp] & 0xff]++;
- ll8[lastShadow] = seqToUnseq[tmp];
-
- /*
- * This loop is hammered during decompression, hence avoid
- * native method call overhead of System.arraycopy for very
- * small ranges to copy.
- */
- if (nextSym <= 16) {
- for (int j = nextSym - 1; j > 0;) {
- yy[j] = yy[--j];
- }
- } else {
- System.arraycopy(yy, 0, yy, 1, nextSym - 1);
- }
-
- yy[0] = tmp;
-
- if (groupPos == 0) {
- groupPos = G_SIZE - 1;
- checkBounds(++groupNo, MAX_SELECTORS, "groupNo");
- zt = selector[groupNo] & 0xff;
- checkBounds(zt, N_GROUPS, "zt");
- base_zt = base[zt];
- limit_zt = limit[zt];
- perm_zt = perm[zt];
- minLens_zt = minLens[zt];
- } else {
- groupPos--;
- }
-
- int zn = minLens_zt;
- checkBounds(zn, MAX_ALPHA_SIZE, "zn");
- int zvec = bsR(bin, zn);
- while(zvec > limit_zt[zn]) {
- checkBounds(++zn, MAX_ALPHA_SIZE, "zn");
- zvec = (zvec << 1) | bsR(bin, 1);
- }
- final int idx = zvec - base_zt[zn];
- checkBounds(idx, MAX_ALPHA_SIZE, "zvec");
- nextSym = perm_zt[idx];
- }
- }
-
- this.last = lastShadow;
- }
-
- private int getAndMoveToFrontDecode0() throws IOException {
- final Data dataShadow = this.data;
- final int zt = dataShadow.selector[0] & 0xff;
- checkBounds(zt, N_GROUPS, "zt");
- final int[] limit_zt = dataShadow.limit[zt];
- int zn = dataShadow.minLens[zt];
- checkBounds(zn, MAX_ALPHA_SIZE, "zn");
- int zvec = bsR(bin, zn);
- while (zvec > limit_zt[zn]) {
- checkBounds(++zn, MAX_ALPHA_SIZE, "zn");
- zvec = (zvec << 1) | bsR(bin, 1);
- }
- final int tmp = zvec - dataShadow.base[zt][zn];
- checkBounds(tmp, MAX_ALPHA_SIZE, "zvec");
-
- return dataShadow.perm[zt][tmp];
- }
-
- private int setupBlock() throws IOException {
- if (currentState == EOF || this.data == null) {
- return -1;
- }
-
- final int[] cftab = this.data.cftab;
- final int ttLen = this.last + 1;
- final int[] tt = this.data.initTT(ttLen);
- final byte[] ll8 = this.data.ll8;
- cftab[0] = 0;
- System.arraycopy(this.data.unzftab, 0, cftab, 1, 256);
-
- for (int i = 1, c = cftab[0]; i <= 256; i++) {
- c += cftab[i];
- cftab[i] = c;
- }
-
- for (int i = 0, lastShadow = this.last; i <= lastShadow; i++) {
- final int tmp = cftab[ll8[i] & 0xff]++;
- checkBounds(tmp, ttLen, "tt index");
- tt[tmp] = i;
- }
-
- if ((this.origPtr < 0) || (this.origPtr >= tt.length)) {
- throw new IOException("stream corrupted");
- }
-
- this.su_tPos = tt[this.origPtr];
- this.su_count = 0;
- this.su_i2 = 0;
- this.su_ch2 = 256; /* not a char and not EOF */
-
- if (this.blockRandomised) {
- this.su_rNToGo = 0;
- this.su_rTPos = 0;
- return setupRandPartA();
- }
- return setupNoRandPartA();
- }
-
- private int setupRandPartA() throws IOException {
- if (this.su_i2 <= this.last) {
- this.su_chPrev = this.su_ch2;
- int su_ch2Shadow = this.data.ll8[this.su_tPos] & 0xff;
- checkBounds(this.su_tPos, this.data.tt.length, "su_tPos");
- this.su_tPos = this.data.tt[this.su_tPos];
- if (this.su_rNToGo == 0) {
- this.su_rNToGo = Rand.rNums(this.su_rTPos) - 1;
- if (++this.su_rTPos == 512) {
- this.su_rTPos = 0;
- }
- } else {
- this.su_rNToGo--;
- }
- this.su_ch2 = su_ch2Shadow ^= (this.su_rNToGo == 1) ? 1 : 0;
- this.su_i2++;
- this.currentState = RAND_PART_B_STATE;
- this.crc.updateCRC(su_ch2Shadow);
- return su_ch2Shadow;
- }
- endBlock();
- initBlock();
- return setupBlock();
- }
-
- private int setupNoRandPartA() throws IOException {
- if (this.su_i2 <= this.last) {
- this.su_chPrev = this.su_ch2;
- final int su_ch2Shadow = this.data.ll8[this.su_tPos] & 0xff;
- this.su_ch2 = su_ch2Shadow;
- checkBounds(this.su_tPos, this.data.tt.length, "su_tPos");
- this.su_tPos = this.data.tt[this.su_tPos];
- this.su_i2++;
- this.currentState = NO_RAND_PART_B_STATE;
- this.crc.updateCRC(su_ch2Shadow);
- return su_ch2Shadow;
- }
- this.currentState = NO_RAND_PART_A_STATE;
- endBlock();
- initBlock();
- return setupBlock();
- }
-
- private int setupRandPartB() throws IOException {
- if (this.su_ch2 != this.su_chPrev) {
- this.currentState = RAND_PART_A_STATE;
- this.su_count = 1;
- return setupRandPartA();
- } else if (++this.su_count >= 4) {
- this.su_z = (char) (this.data.ll8[this.su_tPos] & 0xff);
- checkBounds(this.su_tPos, this.data.tt.length, "su_tPos");
- this.su_tPos = this.data.tt[this.su_tPos];
- if (this.su_rNToGo == 0) {
- this.su_rNToGo = Rand.rNums(this.su_rTPos) - 1;
- if (++this.su_rTPos == 512) {
- this.su_rTPos = 0;
- }
- } else {
- this.su_rNToGo--;
- }
- this.su_j2 = 0;
- this.currentState = RAND_PART_C_STATE;
- if (this.su_rNToGo == 1) {
- this.su_z ^= 1;
- }
- return setupRandPartC();
- } else {
- this.currentState = RAND_PART_A_STATE;
- return setupRandPartA();
- }
- }
-
- private int setupRandPartC() throws IOException {
- if (this.su_j2 < this.su_z) {
- this.crc.updateCRC(this.su_ch2);
- this.su_j2++;
- return this.su_ch2;
- }
- this.currentState = RAND_PART_A_STATE;
- this.su_i2++;
- this.su_count = 0;
- return setupRandPartA();
- }
-
- private int setupNoRandPartB() throws IOException {
- if (this.su_ch2 != this.su_chPrev) {
- this.su_count = 1;
- return setupNoRandPartA();
- } else if (++this.su_count >= 4) {
- checkBounds(this.su_tPos, this.data.ll8.length, "su_tPos");
- this.su_z = (char) (this.data.ll8[this.su_tPos] & 0xff);
- this.su_tPos = this.data.tt[this.su_tPos];
- this.su_j2 = 0;
- return setupNoRandPartC();
- } else {
- return setupNoRandPartA();
- }
- }
-
- private int setupNoRandPartC() throws IOException {
- if (this.su_j2 < this.su_z) {
- final int su_ch2Shadow = this.su_ch2;
- this.crc.updateCRC(su_ch2Shadow);
- this.su_j2++;
- this.currentState = NO_RAND_PART_C_STATE;
- return su_ch2Shadow;
- }
- this.su_i2++;
- this.su_count = 0;
- return setupNoRandPartA();
- }
-
- private static final class Data {
-
- // (with blockSize 900k)
- final boolean[] inUse = new boolean[256]; // 256 byte
-
- final byte[] seqToUnseq = new byte[256]; // 256 byte
- final byte[] selector = new byte[MAX_SELECTORS]; // 18002 byte
- final byte[] selectorMtf = new byte[MAX_SELECTORS]; // 18002 byte
-
- /**
- * Freq table collected to save a pass over the data during
- * decompression.
- */
- final int[] unzftab = new int[256]; // 1024 byte
-
- final int[][] limit = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte
- final int[][] base = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte
- final int[][] perm = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte
- final int[] minLens = new int[N_GROUPS]; // 24 byte
-
- final int[] cftab = new int[257]; // 1028 byte
- final char[] getAndMoveToFrontDecode_yy = new char[256]; // 512 byte
- final char[][] temp_charArray2d = new char[N_GROUPS][MAX_ALPHA_SIZE]; // 3096
- // byte
- final byte[] recvDecodingTables_pos = new byte[N_GROUPS]; // 6 byte
- // ---------------
- // 60798 byte
-
- int[] tt; // 3600000 byte
- byte[] ll8; // 900000 byte
-
- // ---------------
- // 4560782 byte
- // ===============
-
- Data(final int blockSize100k) {
- this.ll8 = new byte[blockSize100k * BZip2Constants.BASEBLOCKSIZE];
- }
-
- /**
- * Initializes the {@link #tt} array.
- *
- * This method is called when the required length of the array is known.
- * I don't initialize it at construction time to avoid unneccessary
- * memory allocation when compressing small files.
- */
- int[] initTT(final int length) {
- int[] ttShadow = this.tt;
-
- // tt.length should always be >= length, but theoretically
- // it can happen, if the compressor mixed small and large
- // blocks. Normally only the last block will be smaller
- // than others.
- if ((ttShadow == null) || (ttShadow.length < length)) {
- this.tt = ttShadow = new int[length];
- }
-
- return ttShadow;
- }
-
- }
-
- /**
- * Checks if the signature matches what is expected for a bzip2 file.
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this stream is a bzip2 compressed stream, false otherwise
- *
- * @since 1.1
- */
- public static boolean matches(final byte[] signature, final int length) {
- return length >= 3 && signature[0] == 'B' &&
- signature[1] == 'Z' && signature[2] == 'h';
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorOutputStream.java b/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorOutputStream.java
deleted file mode 100644
index ba2beb16469..00000000000
--- a/src/org/apache/commons/compress/compressors/bzip2/BZip2CompressorOutputStream.java
+++ /dev/null
@@ -1,1334 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.bzip2;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-
-/**
- * An output stream that compresses into the BZip2 format into another stream.
- *
- *
- * The compression requires large amounts of memory. Thus you should call the
- * {@link #close() close()} method as soon as possible, to force
- * {@code BZip2CompressorOutputStream} to release the allocated memory.
- *
- *
- *
You can shrink the amount of allocated memory and maybe raise
- * the compression speed by choosing a lower blocksize, which in turn
- * may cause a lower compression ratio. You can avoid unnecessary
- * memory allocation by avoiding using a blocksize which is bigger
- * than the size of the input.
- *
- *
You can compute the memory usage for compressing by the
- * following formula:
- *
- *
- * <code>400k + (9 * blocksize)</code>.
- *
- *
- *
To get the memory required for decompression by {@link
- * BZip2CompressorInputStream} use
- *
- *
- * <code>65k + (5 * blocksize)</code>.
- *
- *
- *
- *
- *
Memory usage by blocksize
- *
- *
- *
Blocksize
Compression
- * memory usage
Decompression
- * memory usage
- *
- *
- *
100k
- *
1300k
- *
565k
- *
- *
- *
200k
- *
2200k
- *
1065k
- *
- *
- *
300k
- *
3100k
- *
1565k
- *
- *
- *
400k
- *
4000k
- *
2065k
- *
- *
- *
500k
- *
4900k
- *
2565k
- *
- *
- *
600k
- *
5800k
- *
3065k
- *
- *
- *
700k
- *
6700k
- *
3565k
- *
- *
- *
800k
- *
7600k
- *
4065k
- *
- *
- *
900k
- *
8500k
- *
4565k
- *
- *
- *
- *
- * For decompression {@code BZip2CompressorInputStream} allocates less memory if the
- * bzipped input is smaller than one block.
- *
- *
- *
- * Instances of this class are not threadsafe.
- *
- *
- *
- * TODO: Update to BZip2 1.0.1
- *
- * @NotThreadSafe
- */
-public class BZip2CompressorOutputStream extends CompressorOutputStream
- implements BZip2Constants {
-
- /**
- * The minimum supported blocksize {@code == 1}.
- */
- public static final int MIN_BLOCKSIZE = 1;
-
- /**
- * The maximum supported blocksize {@code == 9}.
- */
- public static final int MAX_BLOCKSIZE = 9;
-
- private static final int GREATER_ICOST = 15;
- private static final int LESSER_ICOST = 0;
-
- private static void hbMakeCodeLengths(final byte[] len, final int[] freq,
- final Data dat, final int alphaSize,
- final int maxLen) {
- /*
- * Nodes and heap entries run from 1. Entry 0 for both the heap and
- * nodes is a sentinel.
- */
- final int[] heap = dat.heap;
- final int[] weight = dat.weight;
- final int[] parent = dat.parent;
-
- for (int i = alphaSize; --i >= 0;) {
- weight[i + 1] = (freq[i] == 0 ? 1 : freq[i]) << 8;
- }
-
- for (boolean tooLong = true; tooLong;) {
- tooLong = false;
-
- int nNodes = alphaSize;
- int nHeap = 0;
- heap[0] = 0;
- weight[0] = 0;
- parent[0] = -2;
-
- for (int i = 1; i <= alphaSize; i++) {
- parent[i] = -1;
- nHeap++;
- heap[nHeap] = i;
-
- int zz = nHeap;
- final int tmp = heap[zz];
- while (weight[tmp] < weight[heap[zz >> 1]]) {
- heap[zz] = heap[zz >> 1];
- zz >>= 1;
- }
- heap[zz] = tmp;
- }
-
- while (nHeap > 1) {
- final int n1 = heap[1];
- heap[1] = heap[nHeap];
- nHeap--;
-
- int yy = 0;
- int zz = 1;
- int tmp = heap[1];
-
- while (true) {
- yy = zz << 1;
-
- if (yy > nHeap) {
- break;
- }
-
- if ((yy < nHeap)
- && (weight[heap[yy + 1]] < weight[heap[yy]])) {
- yy++;
- }
-
- if (weight[tmp] < weight[heap[yy]]) {
- break;
- }
-
- heap[zz] = heap[yy];
- zz = yy;
- }
-
- heap[zz] = tmp;
-
- final int n2 = heap[1];
- heap[1] = heap[nHeap];
- nHeap--;
-
- yy = 0;
- zz = 1;
- tmp = heap[1];
-
- while (true) {
- yy = zz << 1;
-
- if (yy > nHeap) {
- break;
- }
-
- if ((yy < nHeap)
- && (weight[heap[yy + 1]] < weight[heap[yy]])) {
- yy++;
- }
-
- if (weight[tmp] < weight[heap[yy]]) {
- break;
- }
-
- heap[zz] = heap[yy];
- zz = yy;
- }
-
- heap[zz] = tmp;
- nNodes++;
- parent[n1] = parent[n2] = nNodes;
-
- final int weight_n1 = weight[n1];
- final int weight_n2 = weight[n2];
- weight[nNodes] = ((weight_n1 & 0xffffff00)
- + (weight_n2 & 0xffffff00))
- | (1 + (((weight_n1 & 0x000000ff)
- > (weight_n2 & 0x000000ff))
- ? (weight_n1 & 0x000000ff)
- : (weight_n2 & 0x000000ff)));
-
- parent[nNodes] = -1;
- nHeap++;
- heap[nHeap] = nNodes;
-
- tmp = 0;
- zz = nHeap;
- tmp = heap[zz];
- final int weight_tmp = weight[tmp];
- while (weight_tmp < weight[heap[zz >> 1]]) {
- heap[zz] = heap[zz >> 1];
- zz >>= 1;
- }
- heap[zz] = tmp;
-
- }
-
- for (int i = 1; i <= alphaSize; i++) {
- int j = 0;
- int k = i;
-
- for (int parent_k; (parent_k = parent[k]) >= 0;) {
- k = parent_k;
- j++;
- }
-
- len[i - 1] = (byte) j;
- if (j > maxLen) {
- tooLong = true;
- }
- }
-
- if (tooLong) {
- for (int i = 1; i < alphaSize; i++) {
- int j = weight[i] >> 8;
- j = 1 + (j >> 1);
- weight[i] = j << 8;
- }
- }
- }
- }
-
- /**
- * Index of the last char in the block, so the block size == last + 1.
- */
- private int last;
-
- /**
- * Always: in the range 0 .. 9. The current block size is 100000 * this
- * number.
- */
- private final int blockSize100k;
-
- private int bsBuff;
- private int bsLive;
- private final CRC crc = new CRC();
-
- private int nInUse;
-
- private int nMTF;
-
- private int currentChar = -1;
- private int runLength = 0;
-
- private int blockCRC;
- private int combinedCRC;
- private final int allowableBlockSize;
-
- /**
- * All memory intensive stuff.
- */
- private Data data;
- private BlockSort blockSorter;
-
- private OutputStream out;
- private volatile boolean closed;
-
- /**
- * Chooses a blocksize based on the given length of the data to compress.
- *
- * @return The blocksize, between {@link #MIN_BLOCKSIZE} and
- * {@link #MAX_BLOCKSIZE} both inclusive. For a negative
- * {@code inputLength} this method returns {@code MAX_BLOCKSIZE}
- * always.
- *
- * @param inputLength
- * The length of the data which will be compressed by
- * {@code BZip2CompressorOutputStream}.
- */
- public static int chooseBlockSize(final long inputLength) {
- return (inputLength > 0) ? (int) Math
- .min((inputLength / 132000) + 1, 9) : MAX_BLOCKSIZE;
- }
-
- /**
- * Constructs a new {@code BZip2CompressorOutputStream} with a blocksize of 900k.
- *
- * @param out
- * the destination stream.
- *
- * @throws IOException
- * if an I/O error occurs in the specified stream.
- * @throws NullPointerException
- * if out == null.
- */
- public BZip2CompressorOutputStream(final OutputStream out)
- throws IOException {
- this(out, MAX_BLOCKSIZE);
- }
-
- /**
- * Constructs a new {@code BZip2CompressorOutputStream} with specified blocksize.
- *
- * @param out
- * the destination stream.
- * @param blockSize
- * the blockSize as 100k units.
- *
- * @throws IOException
- * if an I/O error occurs in the specified stream.
- * @throws IllegalArgumentException
- * if (blockSize < 1) || (blockSize > 9).
- * @throws NullPointerException
- * if out == null.
- *
- * @see #MIN_BLOCKSIZE
- * @see #MAX_BLOCKSIZE
- */
- public BZip2CompressorOutputStream(final OutputStream out, final int blockSize) throws IOException {
- if (blockSize < 1) {
- throw new IllegalArgumentException("blockSize(" + blockSize + ") < 1");
- }
- if (blockSize > 9) {
- throw new IllegalArgumentException("blockSize(" + blockSize + ") > 9");
- }
-
- this.blockSize100k = blockSize;
- this.out = out;
-
- /* 20 is just a paranoia constant */
- this.allowableBlockSize = (this.blockSize100k * BZip2Constants.BASEBLOCKSIZE) - 20;
- init();
- }
-
- @Override
- public void write(final int b) throws IOException {
- if (!closed) {
- write0(b);
- } else {
- throw new IOException("closed");
- }
- }
-
- /**
- * Writes the current byte to the buffer, run-length encoding it
- * if it has been repeated at least four times (the first step
- * RLEs sequences of four identical bytes).
- *
- *
Flushes the current block before writing data if it is
- * full.
- *
- *
"write to the buffer" means adding to data.buffer starting
- * two steps "after" this.last - initially starting at index 1
- * (not 0) - and updating this.last to point to the last index
- * written minus 1.
- */
- private void writeRun() throws IOException {
- final int lastShadow = this.last;
-
- if (lastShadow < this.allowableBlockSize) {
- final int currentCharShadow = this.currentChar;
- final Data dataShadow = this.data;
- dataShadow.inUse[currentCharShadow] = true;
- final byte ch = (byte) currentCharShadow;
-
- int runLengthShadow = this.runLength;
- this.crc.updateCRC(currentCharShadow, runLengthShadow);
-
- switch (runLengthShadow) {
- case 1:
- dataShadow.block[lastShadow + 2] = ch;
- this.last = lastShadow + 1;
- break;
-
- case 2:
- dataShadow.block[lastShadow + 2] = ch;
- dataShadow.block[lastShadow + 3] = ch;
- this.last = lastShadow + 2;
- break;
-
- case 3: {
- final byte[] block = dataShadow.block;
- block[lastShadow + 2] = ch;
- block[lastShadow + 3] = ch;
- block[lastShadow + 4] = ch;
- this.last = lastShadow + 3;
- }
- break;
-
- default: {
- runLengthShadow -= 4;
- dataShadow.inUse[runLengthShadow] = true;
- final byte[] block = dataShadow.block;
- block[lastShadow + 2] = ch;
- block[lastShadow + 3] = ch;
- block[lastShadow + 4] = ch;
- block[lastShadow + 5] = ch;
- block[lastShadow + 6] = (byte) runLengthShadow;
- this.last = lastShadow + 5;
- }
- break;
-
- }
- } else {
- endBlock();
- initBlock();
- writeRun();
- }
- }
-
- /**
- * Overriden to warn about an unclosed stream.
- */
- @Override
- protected void finalize() throws Throwable {
- if (!closed) {
- System.err.println("Unclosed BZip2CompressorOutputStream detected, will *not* close it");
- }
- super.finalize();
- }
-
-
- public void finish() throws IOException {
- if (!closed) {
- closed = true;
- try {
- if (this.runLength > 0) {
- writeRun();
- }
- this.currentChar = -1;
- endBlock();
- endCompression();
- } finally {
- this.out = null;
- this.blockSorter = null;
- this.data = null;
- }
- }
- }
-
- @Override
- public void close() throws IOException {
- if (!closed) {
- final OutputStream outShadow = this.out;
- finish();
- outShadow.close();
- }
- }
-
- @Override
- public void flush() throws IOException {
- final OutputStream outShadow = this.out;
- if (outShadow != null) {
- outShadow.flush();
- }
- }
-
- /**
- * Writes magic bytes like BZ on the first position of the stream
- * and bytes indiciating the file-format, which is
- * huffmanised, followed by a digit indicating blockSize100k.
- * @throws IOException if the magic bytes could not been written
- */
- private void init() throws IOException {
- bsPutUByte('B');
- bsPutUByte('Z');
-
- this.data = new Data(this.blockSize100k);
- this.blockSorter = new BlockSort(this.data);
-
- // huffmanised magic bytes
- bsPutUByte('h');
- bsPutUByte('0' + this.blockSize100k);
-
- this.combinedCRC = 0;
- initBlock();
- }
-
- private void initBlock() {
- // blockNo++;
- this.crc.initialiseCRC();
- this.last = -1;
- // ch = 0;
-
- final boolean[] inUse = this.data.inUse;
- for (int i = 256; --i >= 0;) {
- inUse[i] = false;
- }
-
- }
-
- private void endBlock() throws IOException {
- this.blockCRC = this.crc.getFinalCRC();
- this.combinedCRC = (this.combinedCRC << 1) | (this.combinedCRC >>> 31);
- this.combinedCRC ^= this.blockCRC;
-
- // empty block at end of file
- if (this.last == -1) {
- return;
- }
-
- /* sort the block and establish posn of original string */
- blockSort();
-
- /*
- * A 6-byte block header, the value chosen arbitrarily as 0x314159265359
- * :-). A 32 bit value does not really give a strong enough guarantee
- * that the value will not appear by chance in the compressed
- * datastream. Worst-case probability of this event, for a 900k block,
- * is about 2.0e-3 for 32 bits, 1.0e-5 for 40 bits and 4.0e-8 for 48
- * bits. For a compressed file of size 100Gb -- about 100000 blocks --
- * only a 48-bit marker will do. NB: normal compression/ decompression
- * donot rely on these statistical properties. They are only important
- * when trying to recover blocks from damaged files.
- */
- bsPutUByte(0x31);
- bsPutUByte(0x41);
- bsPutUByte(0x59);
- bsPutUByte(0x26);
- bsPutUByte(0x53);
- bsPutUByte(0x59);
-
- /* Now the block's CRC, so it is in a known place. */
- bsPutInt(this.blockCRC);
-
- /* Now a single bit indicating no randomisation. */
- bsW(1, 0);
-
- /* Finally, block's contents proper. */
- moveToFrontCodeAndSend();
- }
-
- private void endCompression() throws IOException {
- /*
- * Now another magic 48-bit number, 0x177245385090, to indicate the end
- * of the last block. (sqrt(pi), if you want to know. I did want to use
- * e, but it contains too much repetition -- 27 18 28 18 28 46 -- for me
- * to feel statistically comfortable. Call me paranoid.)
- */
- bsPutUByte(0x17);
- bsPutUByte(0x72);
- bsPutUByte(0x45);
- bsPutUByte(0x38);
- bsPutUByte(0x50);
- bsPutUByte(0x90);
-
- bsPutInt(this.combinedCRC);
- bsFinishedWithStream();
- }
-
- /**
- * Returns the blocksize parameter specified at construction time.
- * @return the blocksize parameter specified at construction time
- */
- public final int getBlockSize() {
- return this.blockSize100k;
- }
-
- @Override
- public void write(final byte[] buf, int offs, final int len)
- throws IOException {
- if (offs < 0) {
- throw new IndexOutOfBoundsException("offs(" + offs + ") < 0.");
- }
- if (len < 0) {
- throw new IndexOutOfBoundsException("len(" + len + ") < 0.");
- }
- if (offs + len > buf.length) {
- throw new IndexOutOfBoundsException("offs(" + offs + ") + len("
- + len + ") > buf.length("
- + buf.length + ").");
- }
- if (closed) {
- throw new IOException("stream closed");
- }
-
- for (final int hi = offs + len; offs < hi;) {
- write0(buf[offs++]);
- }
- }
-
- /**
- * Keeps track of the last bytes written and implicitly performs
- * run-length encoding as the first step of the bzip2 algorithm.
- */
- private void write0(int b) throws IOException {
- if (this.currentChar != -1) {
- b &= 0xff;
- if (this.currentChar == b) {
- if (++this.runLength > 254) {
- writeRun();
- this.currentChar = -1;
- this.runLength = 0;
- }
- // else nothing to do
- } else {
- writeRun();
- this.runLength = 1;
- this.currentChar = b;
- }
- } else {
- this.currentChar = b & 0xff;
- this.runLength++;
- }
- }
-
- private static void hbAssignCodes(final int[] code, final byte[] length,
- final int minLen, final int maxLen,
- final int alphaSize) {
- int vec = 0;
- for (int n = minLen; n <= maxLen; n++) {
- for (int i = 0; i < alphaSize; i++) {
- if ((length[i] & 0xff) == n) {
- code[i] = vec;
- vec++;
- }
- }
- vec <<= 1;
- }
- }
-
- private void bsFinishedWithStream() throws IOException {
- while (this.bsLive > 0) {
- final int ch = this.bsBuff >> 24;
- this.out.write(ch); // write 8-bit
- this.bsBuff <<= 8;
- this.bsLive -= 8;
- }
- }
-
- private void bsW(final int n, final int v) throws IOException {
- final OutputStream outShadow = this.out;
- int bsLiveShadow = this.bsLive;
- int bsBuffShadow = this.bsBuff;
-
- while (bsLiveShadow >= 8) {
- outShadow.write(bsBuffShadow >> 24); // write 8-bit
- bsBuffShadow <<= 8;
- bsLiveShadow -= 8;
- }
-
- this.bsBuff = bsBuffShadow | (v << (32 - bsLiveShadow - n));
- this.bsLive = bsLiveShadow + n;
- }
-
- private void bsPutUByte(final int c) throws IOException {
- bsW(8, c);
- }
-
- private void bsPutInt(final int u) throws IOException {
- bsW(8, (u >> 24) & 0xff);
- bsW(8, (u >> 16) & 0xff);
- bsW(8, (u >> 8) & 0xff);
- bsW(8, u & 0xff);
- }
-
- private void sendMTFValues() throws IOException {
- final byte[][] len = this.data.sendMTFValues_len;
- final int alphaSize = this.nInUse + 2;
-
- for (int t = N_GROUPS; --t >= 0;) {
- final byte[] len_t = len[t];
- for (int v = alphaSize; --v >= 0;) {
- len_t[v] = GREATER_ICOST;
- }
- }
-
- /* Decide how many coding tables to use */
- // assert (this.nMTF > 0) : this.nMTF;
- final int nGroups = (this.nMTF < 200) ? 2 : (this.nMTF < 600) ? 3
- : (this.nMTF < 1200) ? 4 : (this.nMTF < 2400) ? 5 : 6;
-
- /* Generate an initial set of coding tables */
- sendMTFValues0(nGroups, alphaSize);
-
- /*
- * Iterate up to N_ITERS times to improve the tables.
- */
- final int nSelectors = sendMTFValues1(nGroups, alphaSize);
-
- /* Compute MTF values for the selectors. */
- sendMTFValues2(nGroups, nSelectors);
-
- /* Assign actual codes for the tables. */
- sendMTFValues3(nGroups, alphaSize);
-
- /* Transmit the mapping table. */
- sendMTFValues4();
-
- /* Now the selectors. */
- sendMTFValues5(nGroups, nSelectors);
-
- /* Now the coding tables. */
- sendMTFValues6(nGroups, alphaSize);
-
- /* And finally, the block data proper */
- sendMTFValues7();
- }
-
- private void sendMTFValues0(final int nGroups, final int alphaSize) {
- final byte[][] len = this.data.sendMTFValues_len;
- final int[] mtfFreq = this.data.mtfFreq;
-
- int remF = this.nMTF;
- int gs = 0;
-
- for (int nPart = nGroups; nPart > 0; nPart--) {
- final int tFreq = remF / nPart;
- int ge = gs - 1;
- int aFreq = 0;
-
- for (final int a = alphaSize - 1; (aFreq < tFreq) && (ge < a);) {
- aFreq += mtfFreq[++ge];
- }
-
- if ((ge > gs) && (nPart != nGroups) && (nPart != 1)
- && (((nGroups - nPart) & 1) != 0)) {
- aFreq -= mtfFreq[ge--];
- }
-
- final byte[] len_np = len[nPart - 1];
- for (int v = alphaSize; --v >= 0;) {
- if ((v >= gs) && (v <= ge)) {
- len_np[v] = LESSER_ICOST;
- } else {
- len_np[v] = GREATER_ICOST;
- }
- }
-
- gs = ge + 1;
- remF -= aFreq;
- }
- }
-
- private int sendMTFValues1(final int nGroups, final int alphaSize) {
- final Data dataShadow = this.data;
- final int[][] rfreq = dataShadow.sendMTFValues_rfreq;
- final int[] fave = dataShadow.sendMTFValues_fave;
- final short[] cost = dataShadow.sendMTFValues_cost;
- final char[] sfmap = dataShadow.sfmap;
- final byte[] selector = dataShadow.selector;
- final byte[][] len = dataShadow.sendMTFValues_len;
- final byte[] len_0 = len[0];
- final byte[] len_1 = len[1];
- final byte[] len_2 = len[2];
- final byte[] len_3 = len[3];
- final byte[] len_4 = len[4];
- final byte[] len_5 = len[5];
- final int nMTFShadow = this.nMTF;
-
- int nSelectors = 0;
-
- for (int iter = 0; iter < N_ITERS; iter++) {
- for (int t = nGroups; --t >= 0;) {
- fave[t] = 0;
- final int[] rfreqt = rfreq[t];
- for (int i = alphaSize; --i >= 0;) {
- rfreqt[i] = 0;
- }
- }
-
- nSelectors = 0;
-
- for (int gs = 0; gs < this.nMTF;) {
- /* Set group start & end marks. */
-
- /*
- * Calculate the cost of this group as coded by each of the
- * coding tables.
- */
-
- final int ge = Math.min(gs + G_SIZE - 1, nMTFShadow - 1);
-
- if (nGroups == N_GROUPS) {
- // unrolled version of the else-block
-
- short cost0 = 0;
- short cost1 = 0;
- short cost2 = 0;
- short cost3 = 0;
- short cost4 = 0;
- short cost5 = 0;
-
- for (int i = gs; i <= ge; i++) {
- final int icv = sfmap[i];
- cost0 += len_0[icv] & 0xff;
- cost1 += len_1[icv] & 0xff;
- cost2 += len_2[icv] & 0xff;
- cost3 += len_3[icv] & 0xff;
- cost4 += len_4[icv] & 0xff;
- cost5 += len_5[icv] & 0xff;
- }
-
- cost[0] = cost0;
- cost[1] = cost1;
- cost[2] = cost2;
- cost[3] = cost3;
- cost[4] = cost4;
- cost[5] = cost5;
-
- } else {
- for (int t = nGroups; --t >= 0;) {
- cost[t] = 0;
- }
-
- for (int i = gs; i <= ge; i++) {
- final int icv = sfmap[i];
- for (int t = nGroups; --t >= 0;) {
- cost[t] += len[t][icv] & 0xff;
- }
- }
- }
-
- /*
- * Find the coding table which is best for this group, and
- * record its identity in the selector table.
- */
- int bt = -1;
- for (int t = nGroups, bc = 999999999; --t >= 0;) {
- final int cost_t = cost[t];
- if (cost_t < bc) {
- bc = cost_t;
- bt = t;
- }
- }
-
- fave[bt]++;
- selector[nSelectors] = (byte) bt;
- nSelectors++;
-
- /*
- * Increment the symbol frequencies for the selected table.
- */
- final int[] rfreq_bt = rfreq[bt];
- for (int i = gs; i <= ge; i++) {
- rfreq_bt[sfmap[i]]++;
- }
-
- gs = ge + 1;
- }
-
- /*
- * Recompute the tables based on the accumulated frequencies.
- */
- for (int t = 0; t < nGroups; t++) {
- hbMakeCodeLengths(len[t], rfreq[t], this.data, alphaSize, 20);
- }
- }
-
- return nSelectors;
- }
-
- private void sendMTFValues2(final int nGroups, final int nSelectors) {
- // assert (nGroups < 8) : nGroups;
-
- final Data dataShadow = this.data;
- final byte[] pos = dataShadow.sendMTFValues2_pos;
-
- for (int i = nGroups; --i >= 0;) {
- pos[i] = (byte) i;
- }
-
- for (int i = 0; i < nSelectors; i++) {
- final byte ll_i = dataShadow.selector[i];
- byte tmp = pos[0];
- int j = 0;
-
- while (ll_i != tmp) {
- j++;
- final byte tmp2 = tmp;
- tmp = pos[j];
- pos[j] = tmp2;
- }
-
- pos[0] = tmp;
- dataShadow.selectorMtf[i] = (byte) j;
- }
- }
-
- private void sendMTFValues3(final int nGroups, final int alphaSize) {
- final int[][] code = this.data.sendMTFValues_code;
- final byte[][] len = this.data.sendMTFValues_len;
-
- for (int t = 0; t < nGroups; t++) {
- int minLen = 32;
- int maxLen = 0;
- final byte[] len_t = len[t];
- for (int i = alphaSize; --i >= 0;) {
- final int l = len_t[i] & 0xff;
- if (l > maxLen) {
- maxLen = l;
- }
- if (l < minLen) {
- minLen = l;
- }
- }
-
- // assert (maxLen <= 20) : maxLen;
- // assert (minLen >= 1) : minLen;
-
- hbAssignCodes(code[t], len[t], minLen, maxLen, alphaSize);
- }
- }
-
- private void sendMTFValues4() throws IOException {
- final boolean[] inUse = this.data.inUse;
- final boolean[] inUse16 = this.data.sentMTFValues4_inUse16;
-
- for (int i = 16; --i >= 0;) {
- inUse16[i] = false;
- final int i16 = i * 16;
- for (int j = 16; --j >= 0;) {
- if (inUse[i16 + j]) {
- inUse16[i] = true;
- }
- }
- }
-
- for (int i = 0; i < 16; i++) {
- bsW(1, inUse16[i] ? 1 : 0);
- }
-
- final OutputStream outShadow = this.out;
- int bsLiveShadow = this.bsLive;
- int bsBuffShadow = this.bsBuff;
-
- for (int i = 0; i < 16; i++) {
- if (inUse16[i]) {
- final int i16 = i * 16;
- for (int j = 0; j < 16; j++) {
- // inlined: bsW(1, inUse[i16 + j] ? 1 : 0);
- while (bsLiveShadow >= 8) {
- outShadow.write(bsBuffShadow >> 24); // write 8-bit
- bsBuffShadow <<= 8;
- bsLiveShadow -= 8;
- }
- if (inUse[i16 + j]) {
- bsBuffShadow |= 1 << (32 - bsLiveShadow - 1);
- }
- bsLiveShadow++;
- }
- }
- }
-
- this.bsBuff = bsBuffShadow;
- this.bsLive = bsLiveShadow;
- }
-
- private void sendMTFValues5(final int nGroups, final int nSelectors)
- throws IOException {
- bsW(3, nGroups);
- bsW(15, nSelectors);
-
- final OutputStream outShadow = this.out;
- final byte[] selectorMtf = this.data.selectorMtf;
-
- int bsLiveShadow = this.bsLive;
- int bsBuffShadow = this.bsBuff;
-
- for (int i = 0; i < nSelectors; i++) {
- for (int j = 0, hj = selectorMtf[i] & 0xff; j < hj; j++) {
- // inlined: bsW(1, 1);
- while (bsLiveShadow >= 8) {
- outShadow.write(bsBuffShadow >> 24);
- bsBuffShadow <<= 8;
- bsLiveShadow -= 8;
- }
- bsBuffShadow |= 1 << (32 - bsLiveShadow - 1);
- bsLiveShadow++;
- }
-
- // inlined: bsW(1, 0);
- while (bsLiveShadow >= 8) {
- outShadow.write(bsBuffShadow >> 24);
- bsBuffShadow <<= 8;
- bsLiveShadow -= 8;
- }
- // bsBuffShadow |= 0 << (32 - bsLiveShadow - 1);
- bsLiveShadow++;
- }
-
- this.bsBuff = bsBuffShadow;
- this.bsLive = bsLiveShadow;
- }
-
- private void sendMTFValues6(final int nGroups, final int alphaSize)
- throws IOException {
- final byte[][] len = this.data.sendMTFValues_len;
- final OutputStream outShadow = this.out;
-
- int bsLiveShadow = this.bsLive;
- int bsBuffShadow = this.bsBuff;
-
- for (int t = 0; t < nGroups; t++) {
- final byte[] len_t = len[t];
- int curr = len_t[0] & 0xff;
-
- // inlined: bsW(5, curr);
- while (bsLiveShadow >= 8) {
- outShadow.write(bsBuffShadow >> 24); // write 8-bit
- bsBuffShadow <<= 8;
- bsLiveShadow -= 8;
- }
- bsBuffShadow |= curr << (32 - bsLiveShadow - 5);
- bsLiveShadow += 5;
-
- for (int i = 0; i < alphaSize; i++) {
- final int lti = len_t[i] & 0xff;
- while (curr < lti) {
- // inlined: bsW(2, 2);
- while (bsLiveShadow >= 8) {
- outShadow.write(bsBuffShadow >> 24); // write 8-bit
- bsBuffShadow <<= 8;
- bsLiveShadow -= 8;
- }
- bsBuffShadow |= 2 << (32 - bsLiveShadow - 2);
- bsLiveShadow += 2;
-
- curr++; /* 10 */
- }
-
- while (curr > lti) {
- // inlined: bsW(2, 3);
- while (bsLiveShadow >= 8) {
- outShadow.write(bsBuffShadow >> 24); // write 8-bit
- bsBuffShadow <<= 8;
- bsLiveShadow -= 8;
- }
- bsBuffShadow |= 3 << (32 - bsLiveShadow - 2);
- bsLiveShadow += 2;
-
- curr--; /* 11 */
- }
-
- // inlined: bsW(1, 0);
- while (bsLiveShadow >= 8) {
- outShadow.write(bsBuffShadow >> 24); // write 8-bit
- bsBuffShadow <<= 8;
- bsLiveShadow -= 8;
- }
- // bsBuffShadow |= 0 << (32 - bsLiveShadow - 1);
- bsLiveShadow++;
- }
- }
-
- this.bsBuff = bsBuffShadow;
- this.bsLive = bsLiveShadow;
- }
-
- private void sendMTFValues7() throws IOException {
- final Data dataShadow = this.data;
- final byte[][] len = dataShadow.sendMTFValues_len;
- final int[][] code = dataShadow.sendMTFValues_code;
- final OutputStream outShadow = this.out;
- final byte[] selector = dataShadow.selector;
- final char[] sfmap = dataShadow.sfmap;
- final int nMTFShadow = this.nMTF;
-
- int selCtr = 0;
-
- int bsLiveShadow = this.bsLive;
- int bsBuffShadow = this.bsBuff;
-
- for (int gs = 0; gs < nMTFShadow;) {
- final int ge = Math.min(gs + G_SIZE - 1, nMTFShadow - 1);
- final int selector_selCtr = selector[selCtr] & 0xff;
- final int[] code_selCtr = code[selector_selCtr];
- final byte[] len_selCtr = len[selector_selCtr];
-
- while (gs <= ge) {
- final int sfmap_i = sfmap[gs];
-
- //
- // inlined: bsW(len_selCtr[sfmap_i] & 0xff,
- // code_selCtr[sfmap_i]);
- //
- while (bsLiveShadow >= 8) {
- outShadow.write(bsBuffShadow >> 24);
- bsBuffShadow <<= 8;
- bsLiveShadow -= 8;
- }
- final int n = len_selCtr[sfmap_i] & 0xFF;
- bsBuffShadow |= code_selCtr[sfmap_i] << (32 - bsLiveShadow - n);
- bsLiveShadow += n;
-
- gs++;
- }
-
- gs = ge + 1;
- selCtr++;
- }
-
- this.bsBuff = bsBuffShadow;
- this.bsLive = bsLiveShadow;
- }
-
- private void moveToFrontCodeAndSend() throws IOException {
- bsW(24, this.data.origPtr);
- generateMTFValues();
- sendMTFValues();
- }
-
- private void blockSort() {
- blockSorter.blockSort(data, last);
- }
-
- /*
- * Performs Move-To-Front on the Burrows-Wheeler transformed
- * buffer, storing the MTFed data in data.sfmap in RUNA/RUNB
- * run-length-encoded form.
- *
- *
Keeps track of byte frequencies in data.mtfFreq at the same time.
- */
- private void generateMTFValues() {
- final int lastShadow = this.last;
- final Data dataShadow = this.data;
- final boolean[] inUse = dataShadow.inUse;
- final byte[] block = dataShadow.block;
- final int[] fmap = dataShadow.fmap;
- final char[] sfmap = dataShadow.sfmap;
- final int[] mtfFreq = dataShadow.mtfFreq;
- final byte[] unseqToSeq = dataShadow.unseqToSeq;
- final byte[] yy = dataShadow.generateMTFValues_yy;
-
- // make maps
- int nInUseShadow = 0;
- for (int i = 0; i < 256; i++) {
- if (inUse[i]) {
- unseqToSeq[i] = (byte) nInUseShadow;
- nInUseShadow++;
- }
- }
- this.nInUse = nInUseShadow;
-
- final int eob = nInUseShadow + 1;
-
- for (int i = eob; i >= 0; i--) {
- mtfFreq[i] = 0;
- }
-
- for (int i = nInUseShadow; --i >= 0;) {
- yy[i] = (byte) i;
- }
-
- int wr = 0;
- int zPend = 0;
-
- for (int i = 0; i <= lastShadow; i++) {
- final byte ll_i = unseqToSeq[block[fmap[i]] & 0xff];
- byte tmp = yy[0];
- int j = 0;
-
- while (ll_i != tmp) {
- j++;
- final byte tmp2 = tmp;
- tmp = yy[j];
- yy[j] = tmp2;
- }
- yy[0] = tmp;
-
- if (j == 0) {
- zPend++;
- } else {
- if (zPend > 0) {
- zPend--;
- while (true) {
- if ((zPend & 1) == 0) {
- sfmap[wr] = RUNA;
- wr++;
- mtfFreq[RUNA]++;
- } else {
- sfmap[wr] = RUNB;
- wr++;
- mtfFreq[RUNB]++;
- }
-
- if (zPend >= 2) {
- zPend = (zPend - 2) >> 1;
- } else {
- break;
- }
- }
- zPend = 0;
- }
- sfmap[wr] = (char) (j + 1);
- wr++;
- mtfFreq[j + 1]++;
- }
- }
-
- if (zPend > 0) {
- zPend--;
- while (true) {
- if ((zPend & 1) == 0) {
- sfmap[wr] = RUNA;
- wr++;
- mtfFreq[RUNA]++;
- } else {
- sfmap[wr] = RUNB;
- wr++;
- mtfFreq[RUNB]++;
- }
-
- if (zPend >= 2) {
- zPend = (zPend - 2) >> 1;
- } else {
- break;
- }
- }
- }
-
- sfmap[wr] = (char) eob;
- mtfFreq[eob]++;
- this.nMTF = wr + 1;
- }
-
- static final class Data {
-
- // with blockSize 900k
- /* maps unsigned byte => "does it occur in block" */
- final boolean[] inUse = new boolean[256]; // 256 byte
- final byte[] unseqToSeq = new byte[256]; // 256 byte
- final int[] mtfFreq = new int[MAX_ALPHA_SIZE]; // 1032 byte
- final byte[] selector = new byte[MAX_SELECTORS]; // 18002 byte
- final byte[] selectorMtf = new byte[MAX_SELECTORS]; // 18002 byte
-
- final byte[] generateMTFValues_yy = new byte[256]; // 256 byte
- final byte[][] sendMTFValues_len = new byte[N_GROUPS][MAX_ALPHA_SIZE]; // 1548
- // byte
- final int[][] sendMTFValues_rfreq = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192
- // byte
- final int[] sendMTFValues_fave = new int[N_GROUPS]; // 24 byte
- final short[] sendMTFValues_cost = new short[N_GROUPS]; // 12 byte
- final int[][] sendMTFValues_code = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192
- // byte
- final byte[] sendMTFValues2_pos = new byte[N_GROUPS]; // 6 byte
- final boolean[] sentMTFValues4_inUse16 = new boolean[16]; // 16 byte
-
- final int[] heap = new int[MAX_ALPHA_SIZE + 2]; // 1040 byte
- final int[] weight = new int[MAX_ALPHA_SIZE * 2]; // 2064 byte
- final int[] parent = new int[MAX_ALPHA_SIZE * 2]; // 2064 byte
-
- // ------------
- // 333408 byte
-
- /* holds the RLEd block of original data starting at index 1.
- * After sorting the last byte added to the buffer is at index
- * 0. */
- final byte[] block; // 900021 byte
- /* maps index in Burrows-Wheeler transformed block => index of
- * byte in original block */
- final int[] fmap; // 3600000 byte
- final char[] sfmap; // 3600000 byte
- // ------------
- // 8433529 byte
- // ============
-
- /**
- * Index of original line in Burrows-Wheeler table.
- *
- *
This is the index in fmap that points to the last byte
- * of the original data.
- */
- int origPtr;
-
- Data(final int blockSize100k) {
- final int n = blockSize100k * BZip2Constants.BASEBLOCKSIZE;
- this.block = new byte[(n + 1 + NUM_OVERSHOOT_BYTES)];
- this.fmap = new int[n];
- this.sfmap = new char[2 * n];
- }
-
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/bzip2/BZip2Constants.java b/src/org/apache/commons/compress/compressors/bzip2/BZip2Constants.java
deleted file mode 100644
index 9a8b9c4c0c9..00000000000
--- a/src/org/apache/commons/compress/compressors/bzip2/BZip2Constants.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.bzip2;
-
-/**
- * Constants for both the compress and decompress BZip2 classes.
- */
-interface BZip2Constants {
-
- int BASEBLOCKSIZE = 100000;
- int MAX_ALPHA_SIZE = 258;
- int MAX_CODE_LEN = 23;
- int RUNA = 0;
- int RUNB = 1;
- int N_GROUPS = 6;
- int G_SIZE = 50;
- int N_ITERS = 4;
- int MAX_SELECTORS = (2 + (900000 / G_SIZE));
- int NUM_OVERSHOOT_BYTES = 20;
-
-}
\ No newline at end of file
diff --git a/src/org/apache/commons/compress/compressors/bzip2/BZip2Utils.java b/src/org/apache/commons/compress/compressors/bzip2/BZip2Utils.java
deleted file mode 100644
index 5582d981a0d..00000000000
--- a/src/org/apache/commons/compress/compressors/bzip2/BZip2Utils.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.bzip2;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-import org.apache.commons.compress.compressors.FileNameUtil;
-
-/**
- * Utility code for the BZip2 compression format.
- * @ThreadSafe
- * @since 1.1
- */
-public abstract class BZip2Utils {
-
- private static final FileNameUtil fileNameUtil;
-
- static {
- final Map uncompressSuffix =
- new LinkedHashMap<>();
- // backwards compatibilty: BZip2Utils never created the short
- // tbz form, so .tar.bz2 has to be added explicitly
- uncompressSuffix.put(".tar.bz2", ".tar");
- uncompressSuffix.put(".tbz2", ".tar");
- uncompressSuffix.put(".tbz", ".tar");
- uncompressSuffix.put(".bz2", "");
- uncompressSuffix.put(".bz", "");
- fileNameUtil = new FileNameUtil(uncompressSuffix, ".bz2");
- }
-
- /** Private constructor to prevent instantiation of this utility class. */
- private BZip2Utils() {
- }
-
- /**
- * Detects common bzip2 suffixes in the given filename.
- *
- * @param filename name of a file
- * @return {@code true} if the filename has a common bzip2 suffix,
- * {@code false} otherwise
- */
- public static boolean isCompressedFilename(final String filename) {
- return fileNameUtil.isCompressedFilename(filename);
- }
-
- /**
- * Maps the given name of a bzip2-compressed file to the name that the
- * file should have after uncompression. Commonly used file type specific
- * suffixes like ".tbz" or ".tbz2" are automatically detected and
- * correctly mapped. For example the name "package.tbz2" is mapped to
- * "package.tar". And any filenames with the generic ".bz2" suffix
- * (or any other generic bzip2 suffix) is mapped to a name without that
- * suffix. If no bzip2 suffix is detected, then the filename is returned
- * unmapped.
- *
- * @param filename name of a file
- * @return name of the corresponding uncompressed file
- */
- public static String getUncompressedFilename(final String filename) {
- return fileNameUtil.getUncompressedFilename(filename);
- }
-
- /**
- * Maps the given filename to the name that the file should have after
- * compression with bzip2. Currently this method simply appends the suffix
- * ".bz2" to the filename based on the standard behaviour of the "bzip2"
- * program, but a future version may implement a more complex mapping if
- * a new widely used naming pattern emerges.
- *
- * @param filename name of a file
- * @return name of the corresponding compressed file
- */
- public static String getCompressedFilename(final String filename) {
- return fileNameUtil.getCompressedFilename(filename);
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/bzip2/BlockSort.java b/src/org/apache/commons/compress/compressors/bzip2/BlockSort.java
deleted file mode 100644
index 69819e3da68..00000000000
--- a/src/org/apache/commons/compress/compressors/bzip2/BlockSort.java
+++ /dev/null
@@ -1,1082 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.bzip2;
-
-import java.util.BitSet;
-
-/**
- * Encapsulates the Burrows-Wheeler sorting algorithm needed by {@link
- * BZip2CompressorOutputStream}.
- *
- *
This class is based on a Java port of Julian Seward's
- * blocksort.c in his libbzip2
- *
- *
The Burrows-Wheeler transform is a reversible transform of the
- * original data that is supposed to group similar bytes close to
- * each other. The idea is to sort all permutations of the input and
- * only keep the last byte of each permutation. E.g. for "Commons
- * Compress" you'd get:
Which results in a new text "ss romooCCmmpnse", in adition the
- * index of the first line that contained the original text is kept -
- * in this case it is 1. The idea is that in a long English text all
- * permutations that start with "he" are likely suffixes of a "the" and
- * thus they end in "t" leading to a larger block of "t"s that can
- * better be compressed by the subsequent Move-to-Front, run-length
- * und Huffman encoding steps.
- *
- * @NotThreadSafe
- */
-class BlockSort {
-
- /*
- * Some of the constructs used in the C code cannot be ported
- * literally to Java - for example macros, unsigned types. Some
- * code has been hand-tuned to improve performance. In order to
- * avoid memory pressure some structures are reused for several
- * blocks and some memory is even shared between sorting and the
- * MTF stage even though either algorithm uses it for its own
- * purpose.
- *
- * Comments preserved from the actual C code are prefixed with
- * "LBZ2:".
- */
-
- /*
- * 2012-05-20 Stefan Bodewig:
- *
- * This class seems to mix several revisions of libbzip2's code.
- * The mainSort function and those used by it look closer to the
- * 0.9.5 version but show some variations introduced later. At
- * the same time the logic of Compress 1.4 to randomize the block
- * on bad input has been dropped after libbzip2 0.9.0 and replaced
- * by a fallback sorting algorithm.
- *
- * I've added the fallbackSort function of 1.0.6 and tried to
- * integrate it with the existing code without touching too much.
- * I've also removed the now unused randomization code.
- */
-
- /*
- * LBZ2: If you are ever unlucky/improbable enough to get a stack
- * overflow whilst sorting, increase the following constant and
- * try again. In practice I have never seen the stack go above 27
- * elems, so the following limit seems very generous.
- */
- private static final int QSORT_STACK_SIZE = 1000;
-
- private static final int FALLBACK_QSORT_STACK_SIZE = 100;
-
- private static final int STACK_SIZE =
- QSORT_STACK_SIZE < FALLBACK_QSORT_STACK_SIZE
- ? FALLBACK_QSORT_STACK_SIZE : QSORT_STACK_SIZE;
-
- /*
- * Used when sorting. If too many long comparisons happen, we stop sorting,
- * and use fallbackSort instead.
- */
- private int workDone;
- private int workLimit;
- private boolean firstAttempt;
-
- private final int[] stack_ll = new int[STACK_SIZE]; // 4000 byte
- private final int[] stack_hh = new int[STACK_SIZE]; // 4000 byte
- private final int[] stack_dd = new int[QSORT_STACK_SIZE]; // 4000 byte
-
- private final int[] mainSort_runningOrder = new int[256]; // 1024 byte
- private final int[] mainSort_copy = new int[256]; // 1024 byte
- private final boolean[] mainSort_bigDone = new boolean[256]; // 256 byte
-
- private final int[] ftab = new int[65537]; // 262148 byte
-
- /**
- * Array instance identical to Data's sfmap, both are used only
- * temporarily and indepently, so we do not need to allocate
- * additional memory.
- */
- private final char[] quadrant;
-
- BlockSort(final BZip2CompressorOutputStream.Data data) {
- this.quadrant = data.sfmap;
- }
-
- void blockSort(final BZip2CompressorOutputStream.Data data, final int last) {
- this.workLimit = WORK_FACTOR * last;
- this.workDone = 0;
- this.firstAttempt = true;
-
- if (last + 1 < 10000) {
- fallbackSort(data, last);
- } else {
- mainSort(data, last);
-
- if (this.firstAttempt && (this.workDone > this.workLimit)) {
- fallbackSort(data, last);
- }
- }
-
- final int[] fmap = data.fmap;
- data.origPtr = -1;
- for (int i = 0; i <= last; i++) {
- if (fmap[i] == 0) {
- data.origPtr = i;
- break;
- }
- }
-
- // assert (data.origPtr != -1) : data.origPtr;
- }
-
- /**
- * Adapt fallbackSort to the expected interface of the rest of the
- * code, in particular deal with the fact that block starts at
- * offset 1 (in libbzip2 1.0.6 it starts at 0).
- */
- final void fallbackSort(final BZip2CompressorOutputStream.Data data,
- final int last) {
- data.block[0] = data.block[last + 1];
- fallbackSort(data.fmap, data.block, last + 1);
- for (int i = 0; i < last + 1; i++) {
- --data.fmap[i];
- }
- for (int i = 0; i < last + 1; i++) {
- if (data.fmap[i] == -1) {
- data.fmap[i] = last;
- break;
- }
- }
- }
-
-/*---------------------------------------------*/
-
-/*---------------------------------------------*/
-/*--- LBZ2: Fallback O(N log(N)^2) sorting ---*/
-/*--- algorithm, for repetitive blocks ---*/
-/*---------------------------------------------*/
-
- /*
- * This is the fallback sorting algorithm libbzip2 1.0.6 uses for
- * repetitive or very short inputs.
- *
- * The idea is inspired by Manber-Myers string suffix sorting
- * algorithm. First a bucket sort places each permutation of the
- * block into a bucket based on its first byte. Permutations are
- * represented by pointers to their first character kept in
- * (partially) sorted order inside the array ftab.
- *
- * The next step visits all buckets in order and performs a
- * quicksort on all permutations of the bucket based on the index
- * of the bucket the second byte of the permutation belongs to,
- * thereby forming new buckets. When arrived here the
- * permutations are sorted up to the second character and we have
- * buckets of permutations that are identical up to two
- * characters.
- *
- * Repeat the step of quicksorting each bucket, now based on the
- * bucket holding the sequence of the third and forth character
- * leading to four byte buckets. Repeat this doubling of bucket
- * sizes until all buckets only contain single permutations or the
- * bucket size exceeds the block size.
- *
- * I.e.
- *
- * "abraba" form three buckets for the chars "a", "b", and "r" in
- * the first step with
- *
- * fmap = { 'a:' 5, 3, 0, 'b:' 4, 1, 'r', 2 }
- *
- * when looking at the bucket of "a"s the second characters are in
- * the buckets that start with fmap-index 0 (rolled over), 3 and 3
- * respectively, forming two new buckets "aa" and "ab", so we get
- *
- * fmap = { 'aa:' 5, 'ab:' 3, 0, 'ba:' 4, 'br': 1, 'ra:' 2 }
- *
- * since the last bucket only contained a single item it didn't
- * have to be sorted at all.
- *
- * There now is just one bucket with more than one permutation
- * that remains to be sorted. For the permutation that starts
- * with index 3 the third and forth char are in bucket 'aa' at
- * index 0 and for the one starting at block index 0 they are in
- * bucket 'ra' with sort index 5. The fully sorted order then becomes.
- *
- * fmap = { 5, 3, 0, 4, 1, 2 }
- *
- */
-
- /**
- * @param fmap points to the index of the starting point of a
- * permutation inside the block of data in the current
- * partially sorted order
- * @param eclass points from the index of a character inside the
- * block to the first index in fmap that contains the
- * bucket of its suffix that is sorted in this step.
- * @param lo lower boundary of the fmap-interval to be sorted
- * @param hi upper boundary of the fmap-interval to be sorted
- */
- private void fallbackSimpleSort(final int[] fmap,
- final int[] eclass,
- final int lo,
- final int hi) {
- if (lo == hi) {
- return;
- }
-
- int j;
- if (hi - lo > 3) {
- for (int i = hi - 4; i >= lo; i--) {
- final int tmp = fmap[i];
- final int ec_tmp = eclass[tmp];
- for (j = i + 4; j <= hi && ec_tmp > eclass[fmap[j]];
- j += 4) {
- fmap[j - 4] = fmap[j];
- }
- fmap[j - 4] = tmp;
- }
- }
-
- for (int i = hi - 1; i >= lo; i--) {
- final int tmp = fmap[i];
- final int ec_tmp = eclass[tmp];
- for (j = i + 1; j <= hi && ec_tmp > eclass[fmap[j]]; j++) {
- fmap[j - 1] = fmap[j];
- }
- fmap[j-1] = tmp;
- }
- }
-
- private static final int FALLBACK_QSORT_SMALL_THRESH = 10;
-
- /**
- * swaps two values in fmap
- */
- private void fswap(final int[] fmap, final int zz1, final int zz2) {
- final int zztmp = fmap[zz1];
- fmap[zz1] = fmap[zz2];
- fmap[zz2] = zztmp;
- }
-
- /**
- * swaps two intervals starting at yyp1 and yyp2 of length yyn inside fmap.
- */
- private void fvswap(final int[] fmap, int yyp1, int yyp2, int yyn) {
- while (yyn > 0) {
- fswap(fmap, yyp1, yyp2);
- yyp1++; yyp2++; yyn--;
- }
- }
-
- private int fmin(final int a, final int b) {
- return a < b ? a : b;
- }
-
- private void fpush(final int sp, final int lz, final int hz) {
- stack_ll[sp] = lz;
- stack_hh[sp] = hz;
- }
-
- private int[] fpop(final int sp) {
- return new int[] { stack_ll[sp], stack_hh[sp] };
- }
-
- /**
- * @param fmap points to the index of the starting point of a
- * permutation inside the block of data in the current
- * partially sorted order
- * @param eclass points from the index of a character inside the
- * block to the first index in fmap that contains the
- * bucket of its suffix that is sorted in this step.
- * @param loSt lower boundary of the fmap-interval to be sorted
- * @param hiSt upper boundary of the fmap-interval to be sorted
- */
- private void fallbackQSort3(final int[] fmap,
- final int[] eclass,
- final int loSt,
- final int hiSt) {
- int lo, unLo, ltLo, hi, unHi, gtHi, n;
-
- long r = 0;
- int sp = 0;
- fpush(sp++, loSt, hiSt);
-
- while (sp > 0) {
- final int[] s = fpop(--sp);
- lo = s[0]; hi = s[1];
-
- if (hi - lo < FALLBACK_QSORT_SMALL_THRESH) {
- fallbackSimpleSort(fmap, eclass, lo, hi);
- continue;
- }
-
- /* LBZ2: Random partitioning. Median of 3 sometimes fails to
- avoid bad cases. Median of 9 seems to help but
- looks rather expensive. This too seems to work but
- is cheaper. Guidance for the magic constants
- 7621 and 32768 is taken from Sedgewick's algorithms
- book, chapter 35.
- */
- r = ((r * 7621) + 1) % 32768;
- final long r3 = r % 3;
- long med;
- if (r3 == 0) {
- med = eclass[fmap[lo]];
- } else if (r3 == 1) {
- med = eclass[fmap[(lo + hi) >>> 1]];
- } else {
- med = eclass[fmap[hi]];
- }
-
- unLo = ltLo = lo;
- unHi = gtHi = hi;
-
- // looks like the ternary partition attributed to Wegner
- // in the cited Sedgewick paper
- while (true) {
- while (true) {
- if (unLo > unHi) {
- break;
- }
- n = eclass[fmap[unLo]] - (int) med;
- if (n == 0) {
- fswap(fmap, unLo, ltLo);
- ltLo++; unLo++;
- continue;
- }
- if (n > 0) {
- break;
- }
- unLo++;
- }
- while (true) {
- if (unLo > unHi) {
- break;
- }
- n = eclass[fmap[unHi]] - (int) med;
- if (n == 0) {
- fswap(fmap, unHi, gtHi);
- gtHi--; unHi--;
- continue;
- }
- if (n < 0) {
- break;
- }
- unHi--;
- }
- if (unLo > unHi) {
- break;
- }
- fswap(fmap, unLo, unHi); unLo++; unHi--;
- }
-
- if (gtHi < ltLo) {
- continue;
- }
-
- n = fmin(ltLo - lo, unLo - ltLo);
- fvswap(fmap, lo, unLo - n, n);
- int m = fmin(hi - gtHi, gtHi - unHi);
- fvswap(fmap, unHi + 1, hi - m + 1, m);
-
- n = lo + unLo - ltLo - 1;
- m = hi - (gtHi - unHi) + 1;
-
- if (n - lo > hi - m) {
- fpush(sp++, lo, n);
- fpush(sp++, m, hi);
- } else {
- fpush(sp++, m, hi);
- fpush(sp++, lo, n);
- }
- }
- }
-
-
-/*---------------------------------------------*/
-
- private int[] eclass;
-
- private int[] getEclass() {
- if (eclass == null) {
- eclass = new int[quadrant.length / 2];
- }
- return eclass;
- }
-
- /*
- * The C code uses an array of ints (each int holding 32 flags) to
- * represents the bucket-start flags (bhtab). It also contains
- * optimizations to skip over 32 consecutively set or
- * consecutively unset bits on word boundaries at once. For now
- * I've chosen to use the simpler but potentially slower code
- * using BitSet - also in the hope that using the BitSet#nextXXX
- * methods may be fast enough.
- */
-
- /**
- * @param fmap points to the index of the starting point of a
- * permutation inside the block of data in the current
- * partially sorted order
- * @param block the original data
- * @param nblock size of the block
- * @param off offset of first byte to sort in block
- */
- final void fallbackSort(final int[] fmap, final byte[] block, final int nblock) {
- final int[] ftab = new int[257];
- int H, i, j, k, l, r, cc, cc1;
- int nNotDone;
- int nBhtab;
- final int[] eclass = getEclass();
-
- for (i = 0; i < nblock; i++) {
- eclass[i] = 0;
- }
- /*--
- LBZ2: Initial 1-char radix sort to generate
- initial fmap and initial BH bits.
- --*/
- for (i = 0; i < nblock; i++) {
- ftab[block[i] & 0xff]++;
- }
- for (i = 1; i < 257; i++) {
- ftab[i] += ftab[i - 1];
- }
-
- for (i = 0; i < nblock; i++) {
- j = block[i] & 0xff;
- k = ftab[j] - 1;
- ftab[j] = k;
- fmap[k] = i;
- }
-
- nBhtab = 64 + nblock;
- final BitSet bhtab = new BitSet(nBhtab);
- for (i = 0; i < 256; i++) {
- bhtab.set(ftab[i]);
- }
-
- /*--
- LBZ2: Inductively refine the buckets. Kind-of an
- "exponential radix sort" (!), inspired by the
- Manber-Myers suffix array construction algorithm.
- --*/
-
- /*-- LBZ2: set sentinel bits for block-end detection --*/
- for (i = 0; i < 32; i++) {
- bhtab.set(nblock + 2 * i);
- bhtab.clear(nblock + 2 * i + 1);
- }
-
- /*-- LBZ2: the log(N) loop --*/
- H = 1;
- while (true) {
-
- j = 0;
- for (i = 0; i < nblock; i++) {
- if (bhtab.get(i)) {
- j = i;
- }
- k = fmap[i] - H;
- if (k < 0) {
- k += nblock;
- }
- eclass[k] = j;
- }
-
- nNotDone = 0;
- r = -1;
- while (true) {
-
- /*-- LBZ2: find the next non-singleton bucket --*/
- k = r + 1;
- k = bhtab.nextClearBit(k);
- l = k - 1;
- if (l >= nblock) {
- break;
- }
- k = bhtab.nextSetBit(k + 1);
- r = k - 1;
- if (r >= nblock) {
- break;
- }
-
- /*-- LBZ2: now [l, r] bracket current bucket --*/
- if (r > l) {
- nNotDone += (r - l + 1);
- fallbackQSort3(fmap, eclass, l, r);
-
- /*-- LBZ2: scan bucket and generate header bits-- */
- cc = -1;
- for (i = l; i <= r; i++) {
- cc1 = eclass[fmap[i]];
- if (cc != cc1) {
- bhtab.set(i);
- cc = cc1;
- }
- }
- }
- }
-
- H *= 2;
- if (H > nblock || nNotDone == 0) {
- break;
- }
- }
- }
-
-/*---------------------------------------------*/
-
- /*
- * LBZ2: Knuth's increments seem to work better than Incerpi-Sedgewick here.
- * Possibly because the number of elems to sort is usually small, typically
- * <= 20.
- */
- private static final int[] INCS = { 1, 4, 13, 40, 121, 364, 1093, 3280,
- 9841, 29524, 88573, 265720, 797161,
- 2391484 };
-
- /**
- * This is the most hammered method of this class.
- *
- *
- * This is the version using unrolled loops. Normally I never use such ones
- * in Java code. The unrolling has shown a noticable performance improvement
- * on JRE 1.4.2 (Linux i586 / HotSpot Client). Of course it depends on the
- * JIT compiler of the vm.
- *
- */
- private boolean mainSimpleSort(final BZip2CompressorOutputStream.Data dataShadow,
- final int lo, final int hi, final int d,
- final int lastShadow) {
- final int bigN = hi - lo + 1;
- if (bigN < 2) {
- return this.firstAttempt && (this.workDone > this.workLimit);
- }
-
- int hp = 0;
- while (INCS[hp] < bigN) {
- hp++;
- }
-
- final int[] fmap = dataShadow.fmap;
- final char[] quadrant = this.quadrant;
- final byte[] block = dataShadow.block;
- final int lastPlus1 = lastShadow + 1;
- final boolean firstAttemptShadow = this.firstAttempt;
- final int workLimitShadow = this.workLimit;
- int workDoneShadow = this.workDone;
-
- // Following block contains unrolled code which could be shortened by
- // coding it in additional loops.
-
- HP: while (--hp >= 0) {
- final int h = INCS[hp];
- final int mj = lo + h - 1;
-
- for (int i = lo + h; i <= hi;) {
- // copy
- for (int k = 3; (i <= hi) && (--k >= 0); i++) {
- final int v = fmap[i];
- final int vd = v + d;
- int j = i;
-
- // for (int a;
- // (j > mj) && mainGtU((a = fmap[j - h]) + d, vd,
- // block, quadrant, lastShadow);
- // j -= h) {
- // fmap[j] = a;
- // }
- //
- // unrolled version:
-
- // start inline mainGTU
- boolean onceRunned = false;
- int a = 0;
-
- HAMMER: while (true) {
- if (onceRunned) {
- fmap[j] = a;
- if ((j -= h) <= mj) { //NOSONAR
- break HAMMER;
- }
- } else {
- onceRunned = true;
- }
-
- a = fmap[j - h];
- int i1 = a + d;
- int i2 = vd;
-
- // following could be done in a loop, but
- // unrolled it for performance:
- if (block[i1 + 1] == block[i2 + 1]) {
- if (block[i1 + 2] == block[i2 + 2]) {
- if (block[i1 + 3] == block[i2 + 3]) {
- if (block[i1 + 4] == block[i2 + 4]) {
- if (block[i1 + 5] == block[i2 + 5]) {
- if (block[(i1 += 6)] == block[(i2 += 6)]) { //NOSONAR
- int x = lastShadow;
- X: while (x > 0) {
- x -= 4;
-
- if (block[i1 + 1] == block[i2 + 1]) {
- if (quadrant[i1] == quadrant[i2]) {
- if (block[i1 + 2] == block[i2 + 2]) {
- if (quadrant[i1 + 1] == quadrant[i2 + 1]) {
- if (block[i1 + 3] == block[i2 + 3]) {
- if (quadrant[i1 + 2] == quadrant[i2 + 2]) {
- if (block[i1 + 4] == block[i2 + 4]) {
- if (quadrant[i1 + 3] == quadrant[i2 + 3]) {
- if ((i1 += 4) >= lastPlus1) { //NOSONAR
- i1 -= lastPlus1;
- }
- if ((i2 += 4) >= lastPlus1) { //NOSONAR
- i2 -= lastPlus1;
- }
- workDoneShadow++;
- continue X;
- } else if ((quadrant[i1 + 3] > quadrant[i2 + 3])) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((block[i1 + 4] & 0xff) > (block[i2 + 4] & 0xff)) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((quadrant[i1 + 2] > quadrant[i2 + 2])) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((block[i1 + 3] & 0xff) > (block[i2 + 3] & 0xff)) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((quadrant[i1 + 1] > quadrant[i2 + 1])) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((block[i1 + 2] & 0xff) > (block[i2 + 2] & 0xff)) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((quadrant[i1] > quadrant[i2])) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((block[i1 + 1] & 0xff) > (block[i2 + 1] & 0xff)) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
-
- }
- break HAMMER;
- } // while x > 0
- if ((block[i1] & 0xff) > (block[i2] & 0xff)) {
- continue HAMMER;
- }
- break HAMMER;
- } else if ((block[i1 + 5] & 0xff) > (block[i2 + 5] & 0xff)) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((block[i1 + 4] & 0xff) > (block[i2 + 4] & 0xff)) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((block[i1 + 3] & 0xff) > (block[i2 + 3] & 0xff)) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((block[i1 + 2] & 0xff) > (block[i2 + 2] & 0xff)) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
- } else if ((block[i1 + 1] & 0xff) > (block[i2 + 1] & 0xff)) {
- continue HAMMER;
- } else {
- break HAMMER;
- }
-
- } // HAMMER
- // end inline mainGTU
-
- fmap[j] = v;
- }
-
- if (firstAttemptShadow && (i <= hi)
- && (workDoneShadow > workLimitShadow)) {
- break HP;
- }
- }
- }
-
- this.workDone = workDoneShadow;
- return firstAttemptShadow && (workDoneShadow > workLimitShadow);
- }
-
-/*--
- LBZ2: The following is an implementation of
- an elegant 3-way quicksort for strings,
- described in a paper "Fast Algorithms for
- Sorting and Searching Strings", by Robert
- Sedgewick and Jon L. Bentley.
---*/
-
- private static void vswap(final int[] fmap, int p1, int p2, int n) {
- n += p1;
- while (p1 < n) {
- final int t = fmap[p1];
- fmap[p1++] = fmap[p2];
- fmap[p2++] = t;
- }
- }
-
- private static byte med3(final byte a, final byte b, final byte c) {
- return (a < b) ? (b < c ? b : a < c ? c : a) : (b > c ? b : a > c ? c
- : a);
- }
-
- private static final int SMALL_THRESH = 20;
- private static final int DEPTH_THRESH = 10;
- private static final int WORK_FACTOR = 30;
-
- /**
- * Method "mainQSort3", file "blocksort.c", BZip2 1.0.2
- */
- private void mainQSort3(final BZip2CompressorOutputStream.Data dataShadow,
- final int loSt, final int hiSt, final int dSt,
- final int last) {
- final int[] stack_ll = this.stack_ll;
- final int[] stack_hh = this.stack_hh;
- final int[] stack_dd = this.stack_dd;
- final int[] fmap = dataShadow.fmap;
- final byte[] block = dataShadow.block;
-
- stack_ll[0] = loSt;
- stack_hh[0] = hiSt;
- stack_dd[0] = dSt;
-
- for (int sp = 1; --sp >= 0;) {
- final int lo = stack_ll[sp];
- final int hi = stack_hh[sp];
- final int d = stack_dd[sp];
-
- if ((hi - lo < SMALL_THRESH) || (d > DEPTH_THRESH)) {
- if (mainSimpleSort(dataShadow, lo, hi, d, last)) {
- return;
- }
- } else {
- final int d1 = d + 1;
- final int med = med3(block[fmap[lo] + d1],
- block[fmap[hi] + d1], block[fmap[(lo + hi) >>> 1] + d1]) & 0xff;
-
- int unLo = lo;
- int unHi = hi;
- int ltLo = lo;
- int gtHi = hi;
-
- while (true) {
- while (unLo <= unHi) {
- final int n = (block[fmap[unLo] + d1] & 0xff)
- - med;
- if (n == 0) {
- final int temp = fmap[unLo];
- fmap[unLo++] = fmap[ltLo];
- fmap[ltLo++] = temp;
- } else if (n < 0) {
- unLo++;
- } else {
- break;
- }
- }
-
- while (unLo <= unHi) {
- final int n = (block[fmap[unHi] + d1] & 0xff)
- - med;
- if (n == 0) {
- final int temp = fmap[unHi];
- fmap[unHi--] = fmap[gtHi];
- fmap[gtHi--] = temp;
- } else if (n > 0) {
- unHi--;
- } else {
- break;
- }
- }
-
- if (unLo <= unHi) {
- final int temp = fmap[unLo];
- fmap[unLo++] = fmap[unHi];
- fmap[unHi--] = temp;
- } else {
- break;
- }
- }
-
- if (gtHi < ltLo) {
- stack_ll[sp] = lo;
- stack_hh[sp] = hi;
- stack_dd[sp] = d1;
- sp++;
- } else {
- int n = ((ltLo - lo) < (unLo - ltLo)) ? (ltLo - lo)
- : (unLo - ltLo);
- vswap(fmap, lo, unLo - n, n);
- int m = ((hi - gtHi) < (gtHi - unHi)) ? (hi - gtHi)
- : (gtHi - unHi);
- vswap(fmap, unLo, hi - m + 1, m);
-
- n = lo + unLo - ltLo - 1;
- m = hi - (gtHi - unHi) + 1;
-
- stack_ll[sp] = lo;
- stack_hh[sp] = n;
- stack_dd[sp] = d;
- sp++;
-
- stack_ll[sp] = n + 1;
- stack_hh[sp] = m - 1;
- stack_dd[sp] = d1;
- sp++;
-
- stack_ll[sp] = m;
- stack_hh[sp] = hi;
- stack_dd[sp] = d;
- sp++;
- }
- }
- }
- }
-
- private static final int SETMASK = (1 << 21);
- private static final int CLEARMASK = (~SETMASK);
-
- final void mainSort(final BZip2CompressorOutputStream.Data dataShadow,
- final int lastShadow) {
- final int[] runningOrder = this.mainSort_runningOrder;
- final int[] copy = this.mainSort_copy;
- final boolean[] bigDone = this.mainSort_bigDone;
- final int[] ftab = this.ftab;
- final byte[] block = dataShadow.block;
- final int[] fmap = dataShadow.fmap;
- final char[] quadrant = this.quadrant;
- final int workLimitShadow = this.workLimit;
- final boolean firstAttemptShadow = this.firstAttempt;
-
- // LBZ2: Set up the 2-byte frequency table
- for (int i = 65537; --i >= 0;) {
- ftab[i] = 0;
- }
-
- /*
- * In the various block-sized structures, live data runs from 0 to
- * last+NUM_OVERSHOOT_BYTES inclusive. First, set up the overshoot area
- * for block.
- */
- for (int i = 0; i < BZip2Constants.NUM_OVERSHOOT_BYTES; i++) {
- block[lastShadow + i + 2] = block[(i % (lastShadow + 1)) + 1];
- }
- for (int i = lastShadow + BZip2Constants.NUM_OVERSHOOT_BYTES +1; --i >= 0;) {
- quadrant[i] = 0;
- }
- block[0] = block[lastShadow + 1];
-
- // LBZ2: Complete the initial radix sort:
-
- int c1 = block[0] & 0xff;
- for (int i = 0; i <= lastShadow; i++) {
- final int c2 = block[i + 1] & 0xff;
- ftab[(c1 << 8) + c2]++;
- c1 = c2;
- }
-
- for (int i = 1; i <= 65536; i++) {
- ftab[i] += ftab[i - 1];
- }
-
- c1 = block[1] & 0xff;
- for (int i = 0; i < lastShadow; i++) {
- final int c2 = block[i + 2] & 0xff;
- fmap[--ftab[(c1 << 8) + c2]] = i;
- c1 = c2;
- }
-
- fmap[--ftab[((block[lastShadow + 1] & 0xff) << 8) + (block[1] & 0xff)]] = lastShadow;
-
- /*
- * LBZ2: Now ftab contains the first loc of every small bucket. Calculate the
- * running order, from smallest to largest big bucket.
- */
- for (int i = 256; --i >= 0;) {
- bigDone[i] = false;
- runningOrder[i] = i;
- }
-
- // h = 364, 121, 40, 13, 4, 1
- for (int h = 364; h != 1;) { //NOSONAR
- h /= 3;
- for (int i = h; i <= 255; i++) {
- final int vv = runningOrder[i];
- final int a = ftab[(vv + 1) << 8] - ftab[vv << 8];
- final int b = h - 1;
- int j = i;
- for (int ro = runningOrder[j - h]; (ftab[(ro + 1) << 8] - ftab[ro << 8]) > a; ro = runningOrder[j
- - h]) {
- runningOrder[j] = ro;
- j -= h;
- if (j <= b) {
- break;
- }
- }
- runningOrder[j] = vv;
- }
- }
-
- /*
- * LBZ2: The main sorting loop.
- */
- for (int i = 0; i <= 255; i++) {
- /*
- * LBZ2: Process big buckets, starting with the least full.
- */
- final int ss = runningOrder[i];
-
- // Step 1:
- /*
- * LBZ2: Complete the big bucket [ss] by quicksorting any unsorted small
- * buckets [ss, j]. Hopefully previous pointer-scanning phases have
- * already completed many of the small buckets [ss, j], so we don't
- * have to sort them at all.
- */
- for (int j = 0; j <= 255; j++) {
- final int sb = (ss << 8) + j;
- final int ftab_sb = ftab[sb];
- if ((ftab_sb & SETMASK) != SETMASK) {
- final int lo = ftab_sb & CLEARMASK;
- final int hi = (ftab[sb + 1] & CLEARMASK) - 1;
- if (hi > lo) {
- mainQSort3(dataShadow, lo, hi, 2, lastShadow);
- if (firstAttemptShadow
- && (this.workDone > workLimitShadow)) {
- return;
- }
- }
- ftab[sb] = ftab_sb | SETMASK;
- }
- }
-
- // Step 2:
- // LBZ2: Now scan this big bucket so as to synthesise the
- // sorted order for small buckets [t, ss] for all t != ss.
-
- for (int j = 0; j <= 255; j++) {
- copy[j] = ftab[(j << 8) + ss] & CLEARMASK;
- }
-
- for (int j = ftab[ss << 8] & CLEARMASK, hj = (ftab[(ss + 1) << 8] & CLEARMASK); j < hj; j++) {
- final int fmap_j = fmap[j];
- c1 = block[fmap_j] & 0xff;
- if (!bigDone[c1]) {
- fmap[copy[c1]] = (fmap_j == 0) ? lastShadow : (fmap_j - 1);
- copy[c1]++;
- }
- }
-
- for (int j = 256; --j >= 0;) {
- ftab[(j << 8) + ss] |= SETMASK;
- }
-
- // Step 3:
- /*
- * LBZ2: The ss big bucket is now done. Record this fact, and update the
- * quadrant descriptors. Remember to update quadrants in the
- * overshoot area too, if necessary. The "if (i < 255)" test merely
- * skips this updating for the last bucket processed, since updating
- * for the last bucket is pointless.
- */
- bigDone[ss] = true;
-
- if (i < 255) {
- final int bbStart = ftab[ss << 8] & CLEARMASK;
- final int bbSize = (ftab[(ss + 1) << 8] & CLEARMASK) - bbStart;
- int shifts = 0;
-
- while ((bbSize >> shifts) > 65534) {
- shifts++;
- }
-
- for (int j = 0; j < bbSize; j++) {
- final int a2update = fmap[bbStart + j];
- final char qVal = (char) (j >> shifts);
- quadrant[a2update] = qVal;
- if (a2update < BZip2Constants.NUM_OVERSHOOT_BYTES) {
- quadrant[a2update + lastShadow + 1] = qVal;
- }
- }
- }
-
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/bzip2/CRC.java b/src/org/apache/commons/compress/compressors/bzip2/CRC.java
deleted file mode 100644
index a20ea7063d5..00000000000
--- a/src/org/apache/commons/compress/compressors/bzip2/CRC.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.bzip2;
-
-/**
- * A simple class the hold and calculate the CRC for sanity checking of the
- * data.
- * @NotThreadSafe
- */
-class CRC {
- private static final int crc32Table[] = {
- 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9,
- 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
- 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
- 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd,
- 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9,
- 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
- 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
- 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd,
- 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
- 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5,
- 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81,
- 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
- 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49,
- 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95,
- 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
- 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d,
- 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
- 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
- 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16,
- 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca,
- 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
- 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
- 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066,
- 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
- 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e,
- 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692,
- 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
- 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a,
- 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e,
- 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
- 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686,
- 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
- 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
- 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb,
- 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f,
- 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
- 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
- 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b,
- 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
- 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623,
- 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7,
- 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
- 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f,
- 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3,
- 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
- 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b,
- 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
- 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
- 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640,
- 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c,
- 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
- 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
- 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30,
- 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
- 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088,
- 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654,
- 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
- 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c,
- 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18,
- 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
- 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0,
- 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
- 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
- 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
- };
-
- CRC() {
- initialiseCRC();
- }
-
- void initialiseCRC() {
- globalCrc = 0xffffffff;
- }
-
- int getFinalCRC() {
- return ~globalCrc;
- }
-
- int getGlobalCRC() {
- return globalCrc;
- }
-
- void setGlobalCRC(final int newCrc) {
- globalCrc = newCrc;
- }
-
- void updateCRC(final int inCh) {
- int temp = (globalCrc >> 24) ^ inCh;
- if (temp < 0) {
- temp = 256 + temp;
- }
- globalCrc = (globalCrc << 8) ^ CRC.crc32Table[temp];
- }
-
- void updateCRC(final int inCh, int repeat) {
- int globalCrcShadow = this.globalCrc;
- while (repeat-- > 0) {
- final int temp = (globalCrcShadow >> 24) ^ inCh;
- globalCrcShadow = (globalCrcShadow << 8) ^ crc32Table[(temp >= 0)
- ? temp
- : (temp + 256)];
- }
- this.globalCrc = globalCrcShadow;
- }
-
- private int globalCrc;
-}
\ No newline at end of file
diff --git a/src/org/apache/commons/compress/compressors/bzip2/Rand.java b/src/org/apache/commons/compress/compressors/bzip2/Rand.java
deleted file mode 100644
index bb6ef80bae9..00000000000
--- a/src/org/apache/commons/compress/compressors/bzip2/Rand.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.bzip2;
-
-/**
- * Random numbers for both the compress and decompress BZip2 classes.
- */
-final class Rand {
-
- private static final int[] RNUMS = {
- 619, 720, 127, 481, 931, 816, 813, 233, 566, 247,
- 985, 724, 205, 454, 863, 491, 741, 242, 949, 214,
- 733, 859, 335, 708, 621, 574, 73, 654, 730, 472,
- 419, 436, 278, 496, 867, 210, 399, 680, 480, 51,
- 878, 465, 811, 169, 869, 675, 611, 697, 867, 561,
- 862, 687, 507, 283, 482, 129, 807, 591, 733, 623,
- 150, 238, 59, 379, 684, 877, 625, 169, 643, 105,
- 170, 607, 520, 932, 727, 476, 693, 425, 174, 647,
- 73, 122, 335, 530, 442, 853, 695, 249, 445, 515,
- 909, 545, 703, 919, 874, 474, 882, 500, 594, 612,
- 641, 801, 220, 162, 819, 984, 589, 513, 495, 799,
- 161, 604, 958, 533, 221, 400, 386, 867, 600, 782,
- 382, 596, 414, 171, 516, 375, 682, 485, 911, 276,
- 98, 553, 163, 354, 666, 933, 424, 341, 533, 870,
- 227, 730, 475, 186, 263, 647, 537, 686, 600, 224,
- 469, 68, 770, 919, 190, 373, 294, 822, 808, 206,
- 184, 943, 795, 384, 383, 461, 404, 758, 839, 887,
- 715, 67, 618, 276, 204, 918, 873, 777, 604, 560,
- 951, 160, 578, 722, 79, 804, 96, 409, 713, 940,
- 652, 934, 970, 447, 318, 353, 859, 672, 112, 785,
- 645, 863, 803, 350, 139, 93, 354, 99, 820, 908,
- 609, 772, 154, 274, 580, 184, 79, 626, 630, 742,
- 653, 282, 762, 623, 680, 81, 927, 626, 789, 125,
- 411, 521, 938, 300, 821, 78, 343, 175, 128, 250,
- 170, 774, 972, 275, 999, 639, 495, 78, 352, 126,
- 857, 956, 358, 619, 580, 124, 737, 594, 701, 612,
- 669, 112, 134, 694, 363, 992, 809, 743, 168, 974,
- 944, 375, 748, 52, 600, 747, 642, 182, 862, 81,
- 344, 805, 988, 739, 511, 655, 814, 334, 249, 515,
- 897, 955, 664, 981, 649, 113, 974, 459, 893, 228,
- 433, 837, 553, 268, 926, 240, 102, 654, 459, 51,
- 686, 754, 806, 760, 493, 403, 415, 394, 687, 700,
- 946, 670, 656, 610, 738, 392, 760, 799, 887, 653,
- 978, 321, 576, 617, 626, 502, 894, 679, 243, 440,
- 680, 879, 194, 572, 640, 724, 926, 56, 204, 700,
- 707, 151, 457, 449, 797, 195, 791, 558, 945, 679,
- 297, 59, 87, 824, 713, 663, 412, 693, 342, 606,
- 134, 108, 571, 364, 631, 212, 174, 643, 304, 329,
- 343, 97, 430, 751, 497, 314, 983, 374, 822, 928,
- 140, 206, 73, 263, 980, 736, 876, 478, 430, 305,
- 170, 514, 364, 692, 829, 82, 855, 953, 676, 246,
- 369, 970, 294, 750, 807, 827, 150, 790, 288, 923,
- 804, 378, 215, 828, 592, 281, 565, 555, 710, 82,
- 896, 831, 547, 261, 524, 462, 293, 465, 502, 56,
- 661, 821, 976, 991, 658, 869, 905, 758, 745, 193,
- 768, 550, 608, 933, 378, 286, 215, 979, 792, 961,
- 61, 688, 793, 644, 986, 403, 106, 366, 905, 644,
- 372, 567, 466, 434, 645, 210, 389, 550, 919, 135,
- 780, 773, 635, 389, 707, 100, 626, 958, 165, 504,
- 920, 176, 193, 713, 857, 265, 203, 50, 668, 108,
- 645, 990, 626, 197, 510, 357, 358, 850, 858, 364,
- 936, 638
- };
-
- /**
- * Return the random number at a specific index.
- *
- * @param i the index
- * @return the random number
- */
- static int rNums(final int i){
- return RNUMS[i];
- }
-}
\ No newline at end of file
diff --git a/src/org/apache/commons/compress/compressors/bzip2/package.html b/src/org/apache/commons/compress/compressors/bzip2/package.html
deleted file mode 100644
index fe27e6e6668..00000000000
--- a/src/org/apache/commons/compress/compressors/bzip2/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
Provides stream classes for compressing and decompressing
- streams using the BZip2 algorithm.
-
-
diff --git a/src/org/apache/commons/compress/compressors/deflate/DeflateCompressorInputStream.java b/src/org/apache/commons/compress/compressors/deflate/DeflateCompressorInputStream.java
deleted file mode 100644
index 0e072844ab0..00000000000
--- a/src/org/apache/commons/compress/compressors/deflate/DeflateCompressorInputStream.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.deflate;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.zip.Inflater;
-import java.util.zip.InflaterInputStream;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * Deflate decompressor.
- * @since 1.9
- */
-public class DeflateCompressorInputStream extends CompressorInputStream
- implements InputStreamStatistics {
-
- private static final int MAGIC_1 = 0x78;
- private static final int MAGIC_2a = 0x01;
- private static final int MAGIC_2b = 0x5e;
- private static final int MAGIC_2c = 0x9c;
- private static final int MAGIC_2d = 0xda;
-
- private final CountingInputStream countingStream;
- private final InputStream in;
- private final Inflater inflater;
-
- /**
- * Creates a new input stream that decompresses Deflate-compressed data
- * from the specified input stream.
- *
- * @param inputStream where to read the compressed data
- *
- */
- public DeflateCompressorInputStream(final InputStream inputStream) {
- this(inputStream, new DeflateParameters());
- }
-
- /**
- * Creates a new input stream that decompresses Deflate-compressed data
- * from the specified input stream.
- *
- * @param inputStream where to read the compressed data
- * @param parameters parameters
- */
- public DeflateCompressorInputStream(final InputStream inputStream,
- final DeflateParameters parameters) {
- inflater = new Inflater(!parameters.withZlibHeader());
- in = new InflaterInputStream(countingStream = new CountingInputStream(inputStream), inflater);
- }
-
- /** {@inheritDoc} */
- @Override
- public int read() throws IOException {
- final int ret = in.read();
- count(ret == -1 ? 0 : 1);
- return ret;
- }
-
- /** {@inheritDoc} */
- @Override
- public int read(final byte[] buf, final int off, final int len) throws IOException {
- final int ret = in.read(buf, off, len);
- count(ret);
- return ret;
- }
-
- /** {@inheritDoc} */
- @Override
- public long skip(final long n) throws IOException {
- return IOUtils.skip(in, n);
- }
-
- /** {@inheritDoc} */
- @Override
- public int available() throws IOException {
- return in.available();
- }
-
- /** {@inheritDoc} */
- @Override
- public void close() throws IOException {
- try {
- in.close();
- } finally {
- inflater.end();
- }
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return countingStream.getBytesRead();
- }
-
- /**
- * Checks if the signature matches what is expected for a zlib / deflated file
- * with the zlib header.
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this stream is zlib / deflate compressed with a header
- * stream, false otherwise
- *
- * @since 1.10
- */
- public static boolean matches(final byte[] signature, final int length) {
- return length > 3 && signature[0] == MAGIC_1 && (
- signature[1] == (byte) MAGIC_2a ||
- signature[1] == (byte) MAGIC_2b ||
- signature[1] == (byte) MAGIC_2c ||
- signature[1] == (byte) MAGIC_2d);
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/deflate/DeflateCompressorOutputStream.java b/src/org/apache/commons/compress/compressors/deflate/DeflateCompressorOutputStream.java
deleted file mode 100644
index a315605636d..00000000000
--- a/src/org/apache/commons/compress/compressors/deflate/DeflateCompressorOutputStream.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.deflate;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.zip.Deflater;
-import java.util.zip.DeflaterOutputStream;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-
-/**
- * Deflate compressor.
- * @since 1.9
- */
-public class DeflateCompressorOutputStream extends CompressorOutputStream {
- private final DeflaterOutputStream out;
- private final Deflater deflater;
-
- /**
- * Creates a Deflate compressed output stream with the default parameters.
- * @param outputStream the stream to wrap
- * @throws IOException on error
- */
- public DeflateCompressorOutputStream(final OutputStream outputStream) throws IOException {
- this(outputStream, new DeflateParameters());
- }
-
- /**
- * Creates a Deflate compressed output stream with the specified parameters.
- * @param outputStream the stream to wrap
- * @param parameters the deflate parameters to apply
- * @throws IOException on error
- */
- public DeflateCompressorOutputStream(final OutputStream outputStream,
- final DeflateParameters parameters) throws IOException {
- this.deflater = new Deflater(parameters.getCompressionLevel(), !parameters.withZlibHeader());
- this.out = new DeflaterOutputStream(outputStream, deflater);
- }
-
- @Override
- public void write(final int b) throws IOException {
- out.write(b);
- }
-
- @Override
- public void write(final byte[] buf, final int off, final int len) throws IOException {
- out.write(buf, off, len);
- }
-
- /**
- * Flushes the encoder and calls outputStream.flush().
- * All buffered pending data will then be decompressible from
- * the output stream. Calling this function very often may increase
- * the compressed file size a lot.
- */
- @Override
- public void flush() throws IOException {
- out.flush();
- }
-
- /**
- * Finishes compression without closing the underlying stream.
- *
No more data can be written to this stream after finishing.
- * @throws IOException on error
- */
- public void finish() throws IOException {
- out.finish();
- }
-
- @Override
- public void close() throws IOException {
- try {
- out.close();
- } finally {
- deflater.end();
- }
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/deflate/DeflateParameters.java b/src/org/apache/commons/compress/compressors/deflate/DeflateParameters.java
deleted file mode 100644
index 7679942a276..00000000000
--- a/src/org/apache/commons/compress/compressors/deflate/DeflateParameters.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.deflate;
-
-import java.util.zip.Deflater;
-
-/**
- * Parameters for the Deflate compressor.
- * @since 1.9
- */
-public class DeflateParameters {
-
- private boolean zlibHeader = true;
- private int compressionLevel = Deflater.DEFAULT_COMPRESSION;
-
- /**
- * Whether or not the zlib header shall be written (when
- * compressing) or expected (when decompressing).
- * @return true if zlib header shall be written
- */
- public boolean withZlibHeader() {
- return zlibHeader;
- }
-
- /**
- * Sets the zlib header presence parameter.
- *
- *
This affects whether or not the zlib header will be written
- * (when compressing) or expected (when decompressing).
- *
- * @param zlibHeader true if zlib header shall be written
- */
- public void setWithZlibHeader(final boolean zlibHeader) {
- this.zlibHeader = zlibHeader;
- }
-
- /**
- * The compression level.
- * @see #setCompressionLevel
- * @return the compression level
- */
- public int getCompressionLevel() {
- return compressionLevel;
- }
-
- /**
- * Sets the compression level.
- *
- * @param compressionLevel the compression level (between 0 and 9)
- * @see Deflater#NO_COMPRESSION
- * @see Deflater#BEST_SPEED
- * @see Deflater#DEFAULT_COMPRESSION
- * @see Deflater#BEST_COMPRESSION
- */
- public void setCompressionLevel(final int compressionLevel) {
- if (compressionLevel < -1 || compressionLevel > 9) {
- throw new IllegalArgumentException("Invalid Deflate compression level: " + compressionLevel);
- }
- this.compressionLevel = compressionLevel;
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/deflate/package.html b/src/org/apache/commons/compress/compressors/deflate/package.html
deleted file mode 100644
index 4ddeb74873d..00000000000
--- a/src/org/apache/commons/compress/compressors/deflate/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
Provides a stream classes that allow (de)compressing streams
- using the DEFLATE algorithm.
-
-
diff --git a/src/org/apache/commons/compress/compressors/deflate64/Deflate64CompressorInputStream.java b/src/org/apache/commons/compress/compressors/deflate64/Deflate64CompressorInputStream.java
deleted file mode 100644
index 883647b141b..00000000000
--- a/src/org/apache/commons/compress/compressors/deflate64/Deflate64CompressorInputStream.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.compressors.deflate64;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-import static org.apache.commons.compress.utils.IOUtils.closeQuietly;
-
-/**
- * Deflate64 decompressor.
- *
- * @since 1.16
- * @NotThreadSafe
- */
-public class Deflate64CompressorInputStream extends CompressorInputStream implements InputStreamStatistics {
- private InputStream originalStream;
- private HuffmanDecoder decoder;
- private long compressedBytesRead;
- private final byte[] oneByte = new byte[1];
-
- /**
- * Constructs a Deflate64CompressorInputStream.
- *
- * @param in the stream to read from
- */
- public Deflate64CompressorInputStream(InputStream in) {
- this(new HuffmanDecoder(in));
- originalStream = in;
- }
-
- Deflate64CompressorInputStream(HuffmanDecoder decoder) {
- this.decoder = decoder;
- }
-
- /**
- * @throws java.io.EOFException if the underlying stream is exhausted before the end of defalted data was reached.
- */
- @Override
- public int read() throws IOException {
- while (true) {
- int r = read(oneByte);
- switch (r) {
- case 1:
- return oneByte[0] & 0xFF;
- case -1:
- return -1;
- case 0:
- continue;
- default:
- throw new IllegalStateException("Invalid return value from read: " + r);
- }
- }
- }
-
- /**
- * @throws java.io.EOFException if the underlying stream is exhausted before the end of defalted data was reached.
- */
- @Override
- public int read(byte[] b, int off, int len) throws IOException {
- int read = -1;
- if (decoder != null) {
- read = decoder.decode(b, off, len);
- compressedBytesRead = decoder.getBytesRead();
- count(read);
- if (read == -1) {
- closeDecoder();
- }
- }
- return read;
- }
-
- @Override
- public int available() throws IOException {
- return decoder != null ? decoder.available() : 0;
- }
-
- @Override
- public void close() throws IOException {
- closeDecoder();
- if (originalStream != null) {
- originalStream.close();
- originalStream = null;
- }
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return compressedBytesRead;
- }
-
- private void closeDecoder() {
- closeQuietly(decoder);
- decoder = null;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/deflate64/HuffmanDecoder.java b/src/org/apache/commons/compress/compressors/deflate64/HuffmanDecoder.java
deleted file mode 100644
index a6afa2cbcb8..00000000000
--- a/src/org/apache/commons/compress/compressors/deflate64/HuffmanDecoder.java
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.compressors.deflate64;
-
-import org.apache.commons.compress.utils.BitInputStream;
-
-import java.io.Closeable;
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteOrder;
-import java.util.Arrays;
-
-import static org.apache.commons.compress.compressors.deflate64.HuffmanState.*;
-
-class HuffmanDecoder implements Closeable {
-
- /**
- *
- * value = (base of distance) << 4 | (number of extra bits to read)
- */
- private static final int[] DISTANCE_TABLE = {
- 16, 32, 48, 64, 81, 113, 146, 210, 275, 403, // 0-9
- 532, 788, 1045, 1557, 2070, 3094, 4119, 6167, 8216, 12312, // 10-19
- 16409, 24601, 32794, 49178, 65563, 98331, 131100, 196636, 262173, 393245, // 20-29
- 524318, 786462 // 30-31
- };
-
- /**
- * When using dynamic huffman codes the order in which the values are stored
- * follows the positioning below
- */
- private static final int[] CODE_LENGTHS_ORDER =
- {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
-
- /**
- * Huffman Fixed Literal / Distance tables for mode 1
- */
- private static final int[] FIXED_LITERALS;
- private static final int[] FIXED_DISTANCE;
-
- static {
- FIXED_LITERALS = new int[288];
- Arrays.fill(FIXED_LITERALS, 0, 144, 8);
- Arrays.fill(FIXED_LITERALS, 144, 256, 9);
- Arrays.fill(FIXED_LITERALS, 256, 280, 7);
- Arrays.fill(FIXED_LITERALS, 280, 288, 8);
-
- FIXED_DISTANCE = new int[32];
- Arrays.fill(FIXED_DISTANCE, 5);
- }
-
- private boolean finalBlock = false;
- private DecoderState state;
- private BitInputStream reader;
- private final InputStream in;
-
- private final DecodingMemory memory = new DecodingMemory();
-
- HuffmanDecoder(InputStream in) {
- this.reader = new BitInputStream(in, ByteOrder.LITTLE_ENDIAN);
- this.in = in;
- state = new InitialState();
- }
-
- @Override
- public void close() {
- state = new InitialState();
- reader = null;
- }
-
- public int decode(byte[] b) throws IOException {
- return decode(b, 0, b.length);
- }
-
- public int decode(byte[] b, int off, int len) throws IOException {
- while (!finalBlock || state.hasData()) {
- if (state.state() == INITIAL) {
- finalBlock = readBits(1) == 1;
- int mode = (int) readBits(2);
- switch (mode) {
- case 0:
- switchToUncompressedState();
- break;
- case 1:
- state = new HuffmanCodes(FIXED_CODES, FIXED_LITERALS, FIXED_DISTANCE);
- break;
- case 2:
- int[][] tables = readDynamicTables();
- state = new HuffmanCodes(DYNAMIC_CODES, tables[0], tables[1]);
- break;
- default:
- throw new IllegalStateException("Unsupported compression: " + mode);
- }
- } else {
- return state.read(b, off, len);
- }
- }
- return -1;
- }
-
- /**
- * @since 1.17
- */
- long getBytesRead() {
- return reader.getBytesRead();
- }
-
- private void switchToUncompressedState() throws IOException {
- reader.alignWithByteBoundary();
- long bLen = readBits(16);
- long bNLen = readBits(16);
- if (((bLen ^ 0xFFFF) & 0xFFFF) != bNLen) {
- //noinspection DuplicateStringLiteralInspection
- throw new IllegalStateException("Illegal LEN / NLEN values");
- }
- state = new UncompressedState(bLen);
- }
-
- private int[][] readDynamicTables() throws IOException {
- int[][] result = new int[2][];
- int literals = (int) (readBits(5) + 257);
- result[0] = new int[literals];
-
- int distances = (int) (readBits(5) + 1);
- result[1] = new int[distances];
-
- populateDynamicTables(reader, result[0], result[1]);
- return result;
- }
-
- int available() throws IOException {
- return state.available();
- }
-
- private abstract static class DecoderState {
- abstract HuffmanState state();
-
- abstract int read(byte[] b, int off, int len) throws IOException;
-
- abstract boolean hasData();
-
- abstract int available() throws IOException ;
- }
-
- private class UncompressedState extends DecoderState {
- private final long blockLength;
- private long read;
-
- private UncompressedState(long blockLength) {
- this.blockLength = blockLength;
- }
-
- @Override
- HuffmanState state() {
- return read < blockLength ? STORED : INITIAL;
- }
-
- @Override
- int read(byte[] b, int off, int len) throws IOException {
- // as len is an int and (blockLength - read) is >= 0 the min must fit into an int as well
- int max = (int) Math.min(blockLength - read, len);
- int readSoFar = 0;
- while (readSoFar < max) {
- int readNow;
- if (reader.bitsCached() > 0) {
- byte next = (byte) readBits(Byte.SIZE);
- b[off + readSoFar] = memory.add(next);
- readNow = 1;
- } else {
- readNow = in.read(b, off + readSoFar, max - readSoFar);
- if (readNow == -1) {
- throw new EOFException("Truncated Deflate64 Stream");
- }
- memory.add(b, off + readSoFar, readNow);
- }
- read += readNow;
- readSoFar += readNow;
- }
- return max;
- }
-
- @Override
- boolean hasData() {
- return read < blockLength;
- }
-
- @Override
- int available() throws IOException {
- return (int) Math.min(blockLength - read, reader.bitsAvailable() / Byte.SIZE);
- }
- }
-
- private class InitialState extends DecoderState {
- @Override
- HuffmanState state() {
- return INITIAL;
- }
-
- @Override
- int read(byte[] b, int off, int len) throws IOException {
- throw new IllegalStateException("Cannot read in this state");
- }
-
- @Override
- boolean hasData() {
- return false;
- }
-
- @Override
- int available() {
- return 0;
- }
- }
-
- private class HuffmanCodes extends DecoderState {
- private boolean endOfBlock = false;
- private final HuffmanState state;
- private final BinaryTreeNode lengthTree;
- private final BinaryTreeNode distanceTree;
-
- private int runBufferPos = 0;
- private byte[] runBuffer = new byte[0];
- private int runBufferLength = 0;
-
- HuffmanCodes(HuffmanState state, int[] lengths, int[] distance) {
- this.state = state;
- lengthTree = buildTree(lengths);
- distanceTree = buildTree(distance);
- }
-
- @Override
- HuffmanState state() {
- return endOfBlock ? INITIAL : state;
- }
-
- @Override
- int read(byte[] b, int off, int len) throws IOException {
- return decodeNext(b, off, len);
- }
-
- private int decodeNext(byte[] b, int off, int len) throws IOException {
- if (endOfBlock) {
- return -1;
- }
- int result = copyFromRunBuffer(b, off, len);
-
- while (result < len) {
- int symbol = nextSymbol(reader, lengthTree);
- if (symbol < 256) {
- b[off + result++] = memory.add((byte) symbol);
- } else if (symbol > 256) {
- int runMask = RUN_LENGTH_TABLE[symbol - 257];
- int run = runMask >>> 5;
- int runXtra = runMask & 0x1F;
- run += readBits(runXtra);
-
- int distSym = nextSymbol(reader, distanceTree);
-
- int distMask = DISTANCE_TABLE[distSym];
- int dist = distMask >>> 4;
- int distXtra = distMask & 0xF;
- dist += readBits(distXtra);
-
- if (runBuffer.length < run) {
- runBuffer = new byte[run];
- }
- runBufferLength = run;
- runBufferPos = 0;
- memory.recordToBuffer(dist, run, runBuffer);
-
- result += copyFromRunBuffer(b, off + result, len - result);
- } else {
- endOfBlock = true;
- return result;
- }
- }
-
- return result;
- }
-
- private int copyFromRunBuffer(byte[] b, int off, int len) {
- int bytesInBuffer = runBufferLength - runBufferPos;
- int copiedBytes = 0;
- if (bytesInBuffer > 0) {
- copiedBytes = Math.min(len, bytesInBuffer);
- System.arraycopy(runBuffer, runBufferPos, b, off, copiedBytes);
- runBufferPos += copiedBytes;
- }
- return copiedBytes;
- }
-
- @Override
- boolean hasData() {
- return !endOfBlock;
- }
-
- @Override
- int available() {
- return runBufferLength - runBufferPos;
- }
- }
-
- private static int nextSymbol(BitInputStream reader, BinaryTreeNode tree) throws IOException {
- BinaryTreeNode node = tree;
- while (node != null && node.literal == -1) {
- long bit = readBits(reader, 1);
- node = bit == 0 ? node.leftNode : node.rightNode;
- }
- return node != null ? node.literal : -1;
- }
-
- private static void populateDynamicTables(BitInputStream reader, int[] literals, int[] distances) throws IOException {
- int codeLengths = (int) (readBits(reader, 4) + 4);
-
- int[] codeLengthValues = new int[19];
- for (int cLen = 0; cLen < codeLengths; cLen++) {
- codeLengthValues[CODE_LENGTHS_ORDER[cLen]] = (int) readBits(reader, 3);
- }
-
- BinaryTreeNode codeLengthTree = buildTree(codeLengthValues);
-
- final int[] auxBuffer = new int[literals.length + distances.length];
-
- int value = -1;
- int length = 0;
- int off = 0;
- while (off < auxBuffer.length) {
- if (length > 0) {
- auxBuffer[off++] = value;
- length--;
- } else {
- int symbol = nextSymbol(reader, codeLengthTree);
- if (symbol < 16) {
- value = symbol;
- auxBuffer[off++] = value;
- } else if (symbol == 16) {
- length = (int) (readBits(reader, 2) + 3);
- } else if (symbol == 17) {
- value = 0;
- length = (int) (readBits(reader, 3) + 3);
- } else if (symbol == 18) {
- value = 0;
- length = (int) (readBits(reader, 7) + 11);
- }
- }
- }
-
- System.arraycopy(auxBuffer, 0, literals, 0, literals.length);
- System.arraycopy(auxBuffer, literals.length, distances, 0, distances.length);
- }
-
- private static class BinaryTreeNode {
- private final int bits;
- int literal = -1;
- BinaryTreeNode leftNode;
- BinaryTreeNode rightNode;
-
- private BinaryTreeNode(int bits) {
- this.bits = bits;
- }
-
- void leaf(int symbol) {
- literal = symbol;
- leftNode = null;
- rightNode = null;
- }
-
- BinaryTreeNode left() {
- if (leftNode == null && literal == -1) {
- leftNode = new BinaryTreeNode(bits + 1);
- }
- return leftNode;
- }
-
- BinaryTreeNode right() {
- if (rightNode == null && literal == -1) {
- rightNode = new BinaryTreeNode(bits + 1);
- }
- return rightNode;
- }
- }
-
- private static BinaryTreeNode buildTree(int[] litTable) {
- int[] literalCodes = getCodes(litTable);
-
- BinaryTreeNode root = new BinaryTreeNode(0);
-
- for (int i = 0; i < litTable.length; i++) {
- int len = litTable[i];
- if (len != 0) {
- BinaryTreeNode node = root;
- int lit = literalCodes[len - 1];
- for (int p = len - 1; p >= 0; p--) {
- int bit = lit & (1 << p);
- node = bit == 0 ? node.left() : node.right();
- }
- node.leaf(i);
- literalCodes[len - 1]++;
- }
- }
- return root;
- }
-
- private static int[] getCodes(int[] litTable) {
- int max = 0;
- int[] blCount = new int[65];
-
- for (int aLitTable : litTable) {
- max = Math.max(max, aLitTable);
- blCount[aLitTable]++;
- }
- blCount = Arrays.copyOf(blCount, max + 1);
-
- int code = 0;
- int[] nextCode = new int[max + 1];
- for (int i = 0; i <= max; i++) {
- code = (code + blCount[i]) << 1;
- nextCode[i] = code;
- }
-
- return nextCode;
- }
-
- private static class DecodingMemory {
- private final byte[] memory;
- private final int mask;
- private int wHead;
- private boolean wrappedAround;
-
- private DecodingMemory() {
- this(16);
- }
-
- private DecodingMemory(int bits) {
- memory = new byte[1 << bits];
- mask = memory.length - 1;
- }
-
- byte add(byte b) {
- memory[wHead] = b;
- wHead = incCounter(wHead);
- return b;
- }
-
- void add(byte[] b, int off, int len) {
- for (int i = off; i < off + len; i++) {
- add(b[i]);
- }
- }
-
- void recordToBuffer(int distance, int length, byte[] buff) {
- if (distance > memory.length) {
- throw new IllegalStateException("Illegal distance parameter: " + distance);
- }
- int start = (wHead - distance) & mask;
- if (!wrappedAround && start >= wHead) {
- throw new IllegalStateException("Attempt to read beyond memory: dist=" + distance);
- }
- for (int i = 0, pos = start; i < length; i++, pos = incCounter(pos)) {
- buff[i] = add(memory[pos]);
- }
- }
-
- private int incCounter(int counter) {
- final int newCounter = (counter + 1) & mask;
- if (!wrappedAround && newCounter < counter) {
- wrappedAround = true;
- }
- return newCounter;
- }
- }
-
- private long readBits(int numBits) throws IOException {
- return readBits(reader, numBits);
- }
-
- private static long readBits(BitInputStream reader, int numBits) throws IOException {
- long r = reader.readBits(numBits);
- if (r == -1) {
- throw new EOFException("Truncated Deflate64 Stream");
- }
- return r;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/deflate64/HuffmanState.java b/src/org/apache/commons/compress/compressors/deflate64/HuffmanState.java
deleted file mode 100644
index b34bb7ed92f..00000000000
--- a/src/org/apache/commons/compress/compressors/deflate64/HuffmanState.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.compressors.deflate64;
-
-enum HuffmanState {
- INITIAL,
- STORED,
- DYNAMIC_CODES,
- FIXED_CODES
-}
diff --git a/src/org/apache/commons/compress/compressors/deflate64/package.html b/src/org/apache/commons/compress/compressors/deflate64/package.html
deleted file mode 100644
index 4a0cdd0ae9e..00000000000
--- a/src/org/apache/commons/compress/compressors/deflate64/package.html
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-
-
Provides a stream that allows decompressing streams using the
- DEFLATE64(tm) algorithm. DEFLATE64 is a trademark of PKWARE,
- Inc.
-
-
diff --git a/src/org/apache/commons/compress/compressors/gzip/GzipCompressorInputStream.java b/src/org/apache/commons/compress/compressors/gzip/GzipCompressorInputStream.java
deleted file mode 100644
index 9e05f8bcbdb..00000000000
--- a/src/org/apache/commons/compress/compressors/gzip/GzipCompressorInputStream.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.gzip;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.EOFException;
-import java.io.InputStream;
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.BufferedInputStream;
-import java.util.zip.DataFormatException;
-import java.util.zip.Deflater;
-import java.util.zip.Inflater;
-import java.util.zip.CRC32;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.ByteUtils;
-import org.apache.commons.compress.utils.CharsetNames;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * Input stream that decompresses .gz files.
- *
- *
This supports decompressing concatenated .gz files which is important
- * when decompressing standalone .gz files.
- *
- *
- * {@link java.util.zip.GZIPInputStream} doesn't decompress concatenated .gz
- * files: it stops after the first member and silently ignores the rest.
- * It doesn't leave the read position to point to the beginning of the next
- * member, which makes it difficult workaround the lack of concatenation
- * support.
- *
- *
- *
- * Instead of using GZIPInputStream, this class has its own .gz
- * container format decoder. The actual decompression is done with
- * {@link java.util.zip.Inflater}.
- *
- *
- *
If you use the constructor {@code GzipCompressorInputStream(in)}
- * or {@code GzipCompressorInputStream(in, false)} with some {@code
- * InputStream} {@code in} then {@link #read} will return -1 as soon
- * as the first internal member has been read completely. The stream
- * {@code in} will be positioned at the start of the second gzip
- * member if there is one.
- *
- *
If you use the constructor {@code GzipCompressorInputStream(in,
- * true)} with some {@code InputStream} {@code in} then {@link #read}
- * will return -1 once the stream {@code in} has been exhausted. The
- * data read from a stream constructed this way will consist of the
- * concatenated data of all gzip members contained inside {@code
- * in}.
- *
- * @see "https://tools.ietf.org/html/rfc1952"
- */
-public class GzipCompressorInputStream extends CompressorInputStream
- implements InputStreamStatistics {
-
- // Header flags
- // private static final int FTEXT = 0x01; // Uninteresting for us
- private static final int FHCRC = 0x02;
- private static final int FEXTRA = 0x04;
- private static final int FNAME = 0x08;
- private static final int FCOMMENT = 0x10;
- private static final int FRESERVED = 0xE0;
-
- private final CountingInputStream countingStream;
-
- // Compressed input stream, possibly wrapped in a
- // BufferedInputStream, always wrapped in countingStream above
- private final InputStream in;
-
- // True if decompressing multi member streams.
- private final boolean decompressConcatenated;
-
- // Buffer to hold the input data
- private final byte[] buf = new byte[8192];
-
- // Amount of data in buf.
- private int bufUsed;
-
- // Decompressor
- private Inflater inf = new Inflater(true);
-
- // CRC32 from uncompressed data
- private final CRC32 crc = new CRC32();
-
- // True once everything has been decompressed
- private boolean endReached = false;
-
- // used in no-arg read method
- private final byte[] oneByte = new byte[1];
-
- private final GzipParameters parameters = new GzipParameters();
-
- /**
- * Constructs a new input stream that decompresses gzip-compressed data
- * from the specified input stream.
- *
- * This is equivalent to
- * GzipCompressorInputStream(inputStream, false) and thus
- * will not decompress concatenated .gz files.
- *
- * @param inputStream the InputStream from which this object should
- * be created of
- *
- * @throws IOException if the stream could not be created
- */
- public GzipCompressorInputStream(final InputStream inputStream)
- throws IOException {
- this(inputStream, false);
- }
-
- /**
- * Constructs a new input stream that decompresses gzip-compressed data
- * from the specified input stream.
- *
- * If decompressConcatenated is {@code false}:
- * This decompressor might read more input than it will actually use.
- * If inputStream supports mark and
- * reset, then the input position will be adjusted
- * so that it is right after the last byte of the compressed stream.
- * If mark isn't supported, the input position will be
- * undefined.
- *
- * @param inputStream the InputStream from which this object should
- * be created of
- * @param decompressConcatenated
- * if true, decompress until the end of the input;
- * if false, stop after the first .gz member
- *
- * @throws IOException if the stream could not be created
- */
- public GzipCompressorInputStream(final InputStream inputStream,
- final boolean decompressConcatenated)
- throws IOException {
- countingStream = new CountingInputStream(inputStream);
- // Mark support is strictly needed for concatenated files only,
- // but it's simpler if it is always available.
- if (countingStream.markSupported()) {
- in = countingStream;
- } else {
- in = new BufferedInputStream(countingStream);
- }
-
- this.decompressConcatenated = decompressConcatenated;
- init(true);
- }
-
- /**
- * Provides the stream's meta data - may change with each stream
- * when decompressing concatenated streams.
- * @return the stream's meta data
- * @since 1.8
- */
- public GzipParameters getMetaData() {
- return parameters;
- }
-
- private boolean init(final boolean isFirstMember) throws IOException {
- assert isFirstMember || decompressConcatenated;
-
- // Check the magic bytes without a possibility of EOFException.
- final int magic0 = in.read();
- final int magic1 = in.read();
-
- // If end of input was reached after decompressing at least
- // one .gz member, we have reached the end of the file successfully.
- if (magic0 == -1 && !isFirstMember) {
- return false;
- }
-
- if (magic0 != 31 || magic1 != 139) {
- throw new IOException(isFirstMember
- ? "Input is not in the .gz format"
- : "Garbage after a valid .gz stream");
- }
-
- // Parsing the rest of the header may throw EOFException.
- final DataInput inData = new DataInputStream(in);
- final int method = inData.readUnsignedByte();
- if (method != Deflater.DEFLATED) {
- throw new IOException("Unsupported compression method "
- + method + " in the .gz header");
- }
-
- final int flg = inData.readUnsignedByte();
- if ((flg & FRESERVED) != 0) {
- throw new IOException(
- "Reserved flags are set in the .gz header");
- }
-
- parameters.setModificationTime(ByteUtils.fromLittleEndian(inData, 4) * 1000);
- switch (inData.readUnsignedByte()) { // extra flags
- case 2:
- parameters.setCompressionLevel(Deflater.BEST_COMPRESSION);
- break;
- case 4:
- parameters.setCompressionLevel(Deflater.BEST_SPEED);
- break;
- default:
- // ignored for now
- break;
- }
- parameters.setOperatingSystem(inData.readUnsignedByte());
-
- // Extra field, ignored
- if ((flg & FEXTRA) != 0) {
- int xlen = inData.readUnsignedByte();
- xlen |= inData.readUnsignedByte() << 8;
-
- // This isn't as efficient as calling in.skip would be,
- // but it's lazier to handle unexpected end of input this way.
- // Most files don't have an extra field anyway.
- while (xlen-- > 0) {
- inData.readUnsignedByte();
- }
- }
-
- // Original file name
- if ((flg & FNAME) != 0) {
- parameters.setFilename(new String(readToNull(inData),
- CharsetNames.ISO_8859_1));
- }
-
- // Comment
- if ((flg & FCOMMENT) != 0) {
- parameters.setComment(new String(readToNull(inData),
- CharsetNames.ISO_8859_1));
- }
-
- // Header "CRC16" which is actually a truncated CRC32 (which isn't
- // as good as real CRC16). I don't know if any encoder implementation
- // sets this, so it's not worth trying to verify it. GNU gzip 1.4
- // doesn't support this field, but zlib seems to be able to at least
- // skip over it.
- if ((flg & FHCRC) != 0) {
- inData.readShort();
- }
-
- // Reset
- inf.reset();
- crc.reset();
-
- return true;
- }
-
- private static byte[] readToNull(final DataInput inData) throws IOException {
- final ByteArrayOutputStream bos = new ByteArrayOutputStream();
- int b = 0;
- while ((b = inData.readUnsignedByte()) != 0x00) { // NOPMD
- bos.write(b);
- }
- return bos.toByteArray();
- }
-
- @Override
- public int read() throws IOException {
- return read(oneByte, 0, 1) == -1 ? -1 : oneByte[0] & 0xFF;
- }
-
- /**
- * {@inheritDoc}
- *
- * @since 1.1
- */
- @Override
- public int read(final byte[] b, int off, int len) throws IOException {
- if (endReached) {
- return -1;
- }
-
- int size = 0;
-
- while (len > 0) {
- if (inf.needsInput()) {
- // Remember the current position because we may need to
- // rewind after reading too much input.
- in.mark(buf.length);
-
- bufUsed = in.read(buf);
- if (bufUsed == -1) {
- throw new EOFException();
- }
-
- inf.setInput(buf, 0, bufUsed);
- }
-
- int ret;
- try {
- ret = inf.inflate(b, off, len);
- } catch (final DataFormatException e) {
- throw new IOException("Gzip-compressed data is corrupt");
- }
-
- crc.update(b, off, ret);
- off += ret;
- len -= ret;
- size += ret;
- count(ret);
-
- if (inf.finished()) {
- // We may have read too many bytes. Rewind the read
- // position to match the actual amount used.
- //
- // NOTE: The "if" is there just in case. Since we used
- // in.mark earlier, it should always skip enough.
- in.reset();
-
- final int skipAmount = bufUsed - inf.getRemaining();
- if (IOUtils.skip(in, skipAmount) != skipAmount) {
- throw new IOException();
- }
-
- bufUsed = 0;
-
- final DataInput inData = new DataInputStream(in);
-
- // CRC32
- final long crcStored = ByteUtils.fromLittleEndian(inData, 4);
-
- if (crcStored != crc.getValue()) {
- throw new IOException("Gzip-compressed data is corrupt "
- + "(CRC32 error)");
- }
-
- // Uncompressed size modulo 2^32 (ISIZE in the spec)
- final long isize = ByteUtils.fromLittleEndian(inData, 4);
-
- if (isize != (inf.getBytesWritten() & 0xffffffffL)) {
- throw new IOException("Gzip-compressed data is corrupt"
- + "(uncompressed size mismatch)");
- }
-
- // See if this is the end of the file.
- if (!decompressConcatenated || !init(false)) {
- inf.end();
- inf = null;
- endReached = true;
- return size == 0 ? -1 : size;
- }
- }
- }
-
- return size;
- }
-
- /**
- * Checks if the signature matches what is expected for a .gz file.
- *
- * @param signature the bytes to check
- * @param length the number of bytes to check
- * @return true if this is a .gz stream, false otherwise
- *
- * @since 1.1
- */
- public static boolean matches(final byte[] signature, final int length) {
- return length >= 2 && signature[0] == 31 && signature[1] == -117;
- }
-
- /**
- * Closes the input stream (unless it is System.in).
- *
- * @since 1.2
- */
- @Override
- public void close() throws IOException {
- if (inf != null) {
- inf.end();
- inf = null;
- }
-
- if (this.in != System.in) {
- this.in.close();
- }
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return countingStream.getBytesRead();
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/gzip/GzipCompressorOutputStream.java b/src/org/apache/commons/compress/compressors/gzip/GzipCompressorOutputStream.java
deleted file mode 100644
index d3f40124df1..00000000000
--- a/src/org/apache/commons/compress/compressors/gzip/GzipCompressorOutputStream.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.gzip;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.util.zip.CRC32;
-import java.util.zip.Deflater;
-import java.util.zip.GZIPInputStream;
-import java.util.zip.GZIPOutputStream;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-import org.apache.commons.compress.utils.CharsetNames;
-
-/**
- * Compressed output stream using the gzip format. This implementation improves
- * over the standard {@link GZIPOutputStream} class by allowing
- * the configuration of the compression level and the header metadata (filename,
- * comment, modification time, operating system and extra flags).
- *
- * @see GZIP File Format Specification
- */
-public class GzipCompressorOutputStream extends CompressorOutputStream {
-
- /** Header flag indicating a file name follows the header */
- private static final int FNAME = 1 << 3;
-
- /** Header flag indicating a comment follows the header */
- private static final int FCOMMENT = 1 << 4;
-
- /** The underlying stream */
- private final OutputStream out;
-
- /** Deflater used to compress the data */
- private final Deflater deflater;
-
- /** The buffer receiving the compressed data from the deflater */
- private final byte[] deflateBuffer = new byte[512];
-
- /** Indicates if the stream has been closed */
- private boolean closed;
-
- /** The checksum of the uncompressed data */
- private final CRC32 crc = new CRC32();
-
- /**
- * Creates a gzip compressed output stream with the default parameters.
- * @param out the stream to compress to
- * @throws IOException if writing fails
- */
- public GzipCompressorOutputStream(final OutputStream out) throws IOException {
- this(out, new GzipParameters());
- }
-
- /**
- * Creates a gzip compressed output stream with the specified parameters.
- * @param out the stream to compress to
- * @param parameters the parameters to use
- * @throws IOException if writing fails
- *
- * @since 1.7
- */
- public GzipCompressorOutputStream(final OutputStream out, final GzipParameters parameters) throws IOException {
- this.out = out;
- this.deflater = new Deflater(parameters.getCompressionLevel(), true);
-
- writeHeader(parameters);
- }
-
- private void writeHeader(final GzipParameters parameters) throws IOException {
- final String filename = parameters.getFilename();
- final String comment = parameters.getComment();
-
- final ByteBuffer buffer = ByteBuffer.allocate(10);
- buffer.order(ByteOrder.LITTLE_ENDIAN);
- buffer.putShort((short) GZIPInputStream.GZIP_MAGIC);
- buffer.put((byte) Deflater.DEFLATED); // compression method (8: deflate)
- buffer.put((byte) ((filename != null ? FNAME : 0) | (comment != null ? FCOMMENT : 0))); // flags
- buffer.putInt((int) (parameters.getModificationTime() / 1000));
-
- // extra flags
- final int compressionLevel = parameters.getCompressionLevel();
- if (compressionLevel == Deflater.BEST_COMPRESSION) {
- buffer.put((byte) 2);
- } else if (compressionLevel == Deflater.BEST_SPEED) {
- buffer.put((byte) 4);
- } else {
- buffer.put((byte) 0);
- }
-
- buffer.put((byte) parameters.getOperatingSystem());
-
- out.write(buffer.array());
-
- if (filename != null) {
- out.write(filename.getBytes(CharsetNames.ISO_8859_1));
- out.write(0);
- }
-
- if (comment != null) {
- out.write(comment.getBytes(CharsetNames.ISO_8859_1));
- out.write(0);
- }
- }
-
- private void writeTrailer() throws IOException {
- final ByteBuffer buffer = ByteBuffer.allocate(8);
- buffer.order(ByteOrder.LITTLE_ENDIAN);
- buffer.putInt((int) crc.getValue());
- buffer.putInt(deflater.getTotalIn());
-
- out.write(buffer.array());
- }
-
- @Override
- public void write(final int b) throws IOException {
- write(new byte[]{(byte) (b & 0xff)}, 0, 1);
- }
-
- /**
- * {@inheritDoc}
- *
- * @since 1.1
- */
- @Override
- public void write(final byte[] buffer) throws IOException {
- write(buffer, 0, buffer.length);
- }
-
- /**
- * {@inheritDoc}
- *
- * @since 1.1
- */
- @Override
- public void write(final byte[] buffer, final int offset, final int length) throws IOException {
- if (deflater.finished()) {
- throw new IOException("Cannot write more data, the end of the compressed data stream has been reached");
-
- } else if (length > 0) {
- deflater.setInput(buffer, offset, length);
-
- while (!deflater.needsInput()) {
- deflate();
- }
-
- crc.update(buffer, offset, length);
- }
- }
-
- private void deflate() throws IOException {
- final int length = deflater.deflate(deflateBuffer, 0, deflateBuffer.length);
- if (length > 0) {
- out.write(deflateBuffer, 0, length);
- }
- }
-
- /**
- * Finishes writing compressed data to the underlying stream without closing it.
- *
- * @since 1.7
- * @throws IOException on error
- */
- public void finish() throws IOException {
- if (!deflater.finished()) {
- deflater.finish();
-
- while (!deflater.finished()) {
- deflate();
- }
-
- writeTrailer();
- }
- }
-
- /**
- * {@inheritDoc}
- *
- * @since 1.7
- */
- @Override
- public void flush() throws IOException {
- out.flush();
- }
-
- @Override
- public void close() throws IOException {
- if (!closed) {
- finish();
- deflater.end();
- out.close();
- closed = true;
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/gzip/GzipParameters.java b/src/org/apache/commons/compress/compressors/gzip/GzipParameters.java
deleted file mode 100644
index 3887a686eab..00000000000
--- a/src/org/apache/commons/compress/compressors/gzip/GzipParameters.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.gzip;
-
-import java.util.zip.Deflater;
-
-/**
- * Parameters for the GZIP compressor.
- *
- * @since 1.7
- */
-public class GzipParameters {
-
- private int compressionLevel = Deflater.DEFAULT_COMPRESSION;
- private long modificationTime;
- private String filename;
- private String comment;
- private int operatingSystem = 255; // Unknown OS by default
-
- public int getCompressionLevel() {
- return compressionLevel;
- }
-
- /**
- * Sets the compression level.
- *
- * @param compressionLevel the compression level (between 0 and 9)
- * @see Deflater#NO_COMPRESSION
- * @see Deflater#BEST_SPEED
- * @see Deflater#DEFAULT_COMPRESSION
- * @see Deflater#BEST_COMPRESSION
- */
- public void setCompressionLevel(final int compressionLevel) {
- if (compressionLevel < -1 || compressionLevel > 9) {
- throw new IllegalArgumentException("Invalid gzip compression level: " + compressionLevel);
- }
- this.compressionLevel = compressionLevel;
- }
-
- public long getModificationTime() {
- return modificationTime;
- }
-
- /**
- * Sets the modification time of the compressed file.
- *
- * @param modificationTime the modification time, in milliseconds
- */
- public void setModificationTime(final long modificationTime) {
- this.modificationTime = modificationTime;
- }
-
- public String getFilename() {
- return filename;
- }
-
- /**
- * Sets the name of the compressed file.
- *
- * @param filename the name of the file without the directory path
- */
- public void setFilename(final String filename) {
- this.filename = filename;
- }
-
- public String getComment() {
- return comment;
- }
-
- public void setComment(final String comment) {
- this.comment = comment;
- }
-
- public int getOperatingSystem() {
- return operatingSystem;
- }
-
- /**
- * Sets the operating system on which the compression took place.
- * The defined values are:
- *
- *
0: FAT filesystem (MS-DOS, OS/2, NT/Win32)
- *
1: Amiga
- *
2: VMS (or OpenVMS)
- *
3: Unix
- *
4: VM/CMS
- *
5: Atari TOS
- *
6: HPFS filesystem (OS/2, NT)
- *
7: Macintosh
- *
8: Z-System
- *
9: CP/M
- *
10: TOPS-20
- *
11: NTFS filesystem (NT)
- *
12: QDOS
- *
13: Acorn RISCOS
- *
255: Unknown
- *
- *
- * @param operatingSystem the code of the operating system
- */
- public void setOperatingSystem(final int operatingSystem) {
- this.operatingSystem = operatingSystem;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/gzip/GzipUtils.java b/src/org/apache/commons/compress/compressors/gzip/GzipUtils.java
deleted file mode 100644
index 0edf655717a..00000000000
--- a/src/org/apache/commons/compress/compressors/gzip/GzipUtils.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.gzip;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-import org.apache.commons.compress.compressors.FileNameUtil;
-
-/**
- * Utility code for the gzip compression format.
- * @ThreadSafe
- */
-public class GzipUtils {
-
- private static final FileNameUtil fileNameUtil;
-
- static {
- // using LinkedHashMap so .tgz is preferred over .taz as
- // compressed extension of .tar as FileNameUtil will use the
- // first one found
- final Map uncompressSuffix =
- new LinkedHashMap<>();
- uncompressSuffix.put(".tgz", ".tar");
- uncompressSuffix.put(".taz", ".tar");
- uncompressSuffix.put(".svgz", ".svg");
- uncompressSuffix.put(".cpgz", ".cpio");
- uncompressSuffix.put(".wmz", ".wmf");
- uncompressSuffix.put(".emz", ".emf");
- uncompressSuffix.put(".gz", "");
- uncompressSuffix.put(".z", "");
- uncompressSuffix.put("-gz", "");
- uncompressSuffix.put("-z", "");
- uncompressSuffix.put("_z", "");
- fileNameUtil = new FileNameUtil(uncompressSuffix, ".gz");
- }
-
- /** Private constructor to prevent instantiation of this utility class. */
- private GzipUtils() {
- }
-
- /**
- * Detects common gzip suffixes in the given filename.
- *
- * @param filename name of a file
- * @return {@code true} if the filename has a common gzip suffix,
- * {@code false} otherwise
- */
- public static boolean isCompressedFilename(final String filename) {
- return fileNameUtil.isCompressedFilename(filename);
- }
-
- /**
- * Maps the given name of a gzip-compressed file to the name that the
- * file should have after uncompression. Commonly used file type specific
- * suffixes like ".tgz" or ".svgz" are automatically detected and
- * correctly mapped. For example the name "package.tgz" is mapped to
- * "package.tar". And any filenames with the generic ".gz" suffix
- * (or any other generic gzip suffix) is mapped to a name without that
- * suffix. If no gzip suffix is detected, then the filename is returned
- * unmapped.
- *
- * @param filename name of a file
- * @return name of the corresponding uncompressed file
- */
- public static String getUncompressedFilename(final String filename) {
- return fileNameUtil.getUncompressedFilename(filename);
- }
-
- /**
- * Maps the given filename to the name that the file should have after
- * compression with gzip. Common file types with custom suffixes for
- * compressed versions are automatically detected and correctly mapped.
- * For example the name "package.tar" is mapped to "package.tgz". If no
- * custom mapping is applicable, then the default ".gz" suffix is appended
- * to the filename.
- *
- * @param filename name of a file
- * @return name of the corresponding compressed file
- */
- public static String getCompressedFilename(final String filename) {
- return fileNameUtil.getCompressedFilename(filename);
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/gzip/package.html b/src/org/apache/commons/compress/compressors/gzip/package.html
deleted file mode 100644
index e18b50f2fa0..00000000000
--- a/src/org/apache/commons/compress/compressors/gzip/package.html
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-
-
Provides stream classes for compressing and decompressing
- streams using the GZip algorithm.
-
-
The classes in this package are wrappers around {@link
- java.util.zip.GZIPInputStream java.util.zip.GZIPInputStream} and
- {@link java.util.zip.GZIPOutputStream
- java.util.zip.GZIPOutputStream}.
-
-
diff --git a/src/org/apache/commons/compress/compressors/lz4/BlockLZ4CompressorInputStream.java b/src/org/apache/commons/compress/compressors/lz4/BlockLZ4CompressorInputStream.java
deleted file mode 100644
index a52dc6015cc..00000000000
--- a/src/org/apache/commons/compress/compressors/lz4/BlockLZ4CompressorInputStream.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lz4;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.commons.compress.compressors.lz77support.AbstractLZ77CompressorInputStream;
-import org.apache.commons.compress.utils.ByteUtils;
-
-/**
- * CompressorInputStream for the LZ4 block format.
- *
- * @see LZ4 Block Format Description
- * @since 1.14
- * @NotThreadSafe
- */
-public class BlockLZ4CompressorInputStream extends AbstractLZ77CompressorInputStream {
-
- static final int WINDOW_SIZE = 1 << 16;
- static final int SIZE_BITS = 4;
- static final int BACK_REFERENCE_SIZE_MASK = (1 << SIZE_BITS) - 1;
- static final int LITERAL_SIZE_MASK = BACK_REFERENCE_SIZE_MASK << SIZE_BITS;
-
- /** Back-Reference-size part of the block starting byte. */
- private int nextBackReferenceSize;
-
- /** Current state of the stream */
- private State state = State.NO_BLOCK;
-
- /**
- * Creates a new LZ4 input stream.
- *
- * @param is
- * An InputStream to read compressed data from
- *
- * @throws IOException if reading fails
- */
- public BlockLZ4CompressorInputStream(final InputStream is) throws IOException {
- super(is, WINDOW_SIZE);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- switch (state) {
- case EOF:
- return -1;
- case NO_BLOCK: // NOSONAR - fallthrough intended
- readSizes();
- /*FALLTHROUGH*/
- case IN_LITERAL:
- int litLen = readLiteral(b, off, len);
- if (!hasMoreDataInBlock()) {
- state = State.LOOKING_FOR_BACK_REFERENCE;
- }
- return litLen > 0 ? litLen : read(b, off, len);
- case LOOKING_FOR_BACK_REFERENCE: // NOSONAR - fallthrough intended
- if (!initializeBackReference()) {
- state = State.EOF;
- return -1;
- }
- /*FALLTHROUGH*/
- case IN_BACK_REFERENCE:
- int backReferenceLen = readBackReference(b, off, len);
- if (!hasMoreDataInBlock()) {
- state = State.NO_BLOCK;
- }
- return backReferenceLen > 0 ? backReferenceLen : read(b, off, len);
- default:
- throw new IOException("Unknown stream state " + state);
- }
- }
-
- private void readSizes() throws IOException {
- int nextBlock = readOneByte();
- if (nextBlock == -1) {
- throw new IOException("Premature end of stream while looking for next block");
- }
- nextBackReferenceSize = nextBlock & BACK_REFERENCE_SIZE_MASK;
- long literalSizePart = (nextBlock & LITERAL_SIZE_MASK) >> SIZE_BITS;
- if (literalSizePart == BACK_REFERENCE_SIZE_MASK) {
- literalSizePart += readSizeBytes();
- }
- startLiteral(literalSizePart);
- state = State.IN_LITERAL;
- }
-
- private long readSizeBytes() throws IOException {
- long accum = 0;
- int nextByte;
- do {
- nextByte = readOneByte();
- if (nextByte == -1) {
- throw new IOException("Premature end of stream while parsing length");
- }
- accum += nextByte;
- } while (nextByte == 255);
- return accum;
- }
-
- /**
- * @return false if there is no more back-reference - this means this is the
- * last block of the stream.
- */
- private boolean initializeBackReference() throws IOException {
- int backReferenceOffset = 0;
- try {
- backReferenceOffset = (int) ByteUtils.fromLittleEndian(supplier, 2);
- } catch (IOException ex) {
- if (nextBackReferenceSize == 0) { // the last block has no back-reference
- return false;
- }
- throw ex;
- }
- long backReferenceSize = nextBackReferenceSize;
- if (nextBackReferenceSize == BACK_REFERENCE_SIZE_MASK) {
- backReferenceSize += readSizeBytes();
- }
- // minimal match length 4 is encoded as 0
- startBackReference(backReferenceOffset, backReferenceSize + 4);
- state = State.IN_BACK_REFERENCE;
- return true;
- }
-
- private enum State {
- NO_BLOCK, IN_LITERAL, LOOKING_FOR_BACK_REFERENCE, IN_BACK_REFERENCE, EOF
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lz4/BlockLZ4CompressorOutputStream.java b/src/org/apache/commons/compress/compressors/lz4/BlockLZ4CompressorOutputStream.java
deleted file mode 100644
index 2cce3a1ea6c..00000000000
--- a/src/org/apache/commons/compress/compressors/lz4/BlockLZ4CompressorOutputStream.java
+++ /dev/null
@@ -1,505 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lz4;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Deque;
-import java.util.Iterator;
-import java.util.LinkedList;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-import org.apache.commons.compress.compressors.lz77support.LZ77Compressor;
-import org.apache.commons.compress.compressors.lz77support.Parameters;
-import org.apache.commons.compress.utils.ByteUtils;
-
-/**
- * CompressorOutputStream for the LZ4 block format.
- *
- * @see LZ4 Block Format Description
- * @since 1.14
- * @NotThreadSafe
- */
-public class BlockLZ4CompressorOutputStream extends CompressorOutputStream {
-
- private static final int MIN_BACK_REFERENCE_LENGTH = 4;
- private static final int MIN_OFFSET_OF_LAST_BACK_REFERENCE = 12;
-
- /*
-
- The LZ4 block format has a few properties that make it less
- straight-forward than one would hope:
-
- * literal blocks and back-references must come in pairs (except
- for the very last literal block), so consecutive literal
- blocks created by the compressor must be merged into a single
- block.
-
- * the start of a literal/back-reference pair contains the length
- of the back-reference (at least some part of it) so we can't
- start writing the literal before we know how long the next
- back-reference is going to be.
-
- * there are special rules for the final blocks
-
- > There are specific parsing rules to respect in order to remain
- > compatible with assumptions made by the decoder :
- >
- > 1. The last 5 bytes are always literals
- >
- > 2. The last match must start at least 12 bytes before end of
- > block. Consequently, a block with less than 13 bytes cannot be
- > compressed.
-
- which means any back-reference may need to get rewritten as a
- literal block unless we know the next block is at least of
- length 5 and the sum of this block's length and offset and the
- next block's length is at least twelve.
-
- */
-
- private final LZ77Compressor compressor;
- private final OutputStream os;
-
- // used in one-arg write method
- private final byte[] oneByte = new byte[1];
-
- private boolean finished = false;
-
- private Deque pairs = new LinkedList<>();
- // keeps track of the last window-size bytes (64k) in order to be
- // able to expand back-references when needed
- private Deque expandedBlocks = new LinkedList<>();
-
- /**
- * Creates a new LZ4 output stream.
- *
- * @param os
- * An OutputStream to read compressed data from
- *
- * @throws IOException if reading fails
- */
- public BlockLZ4CompressorOutputStream(final OutputStream os) throws IOException {
- this(os, createParameterBuilder().build());
- }
-
- /**
- * Creates a new LZ4 output stream.
- *
- * @param os
- * An OutputStream to read compressed data from
- * @param params
- * The parameters to use for LZ77 compression.
- *
- * @throws IOException if reading fails
- */
- public BlockLZ4CompressorOutputStream(final OutputStream os, Parameters params) throws IOException {
- this.os = os;
- compressor = new LZ77Compressor(params,
- new LZ77Compressor.Callback() {
- @Override
- public void accept(LZ77Compressor.Block block) throws IOException {
- switch (block.getType()) {
- case LITERAL:
- addLiteralBlock((LZ77Compressor.LiteralBlock) block);
- break;
- case BACK_REFERENCE:
- addBackReference((LZ77Compressor.BackReference) block);
- break;
- case EOD:
- writeFinalLiteralBlock();
- break;
- }
- }
- });
- }
-
- @Override
- public void write(int b) throws IOException {
- oneByte[0] = (byte) (b & 0xff);
- write(oneByte);
- }
-
- @Override
- public void write(byte[] data, int off, int len) throws IOException {
- compressor.compress(data, off, len);
- }
-
- @Override
- public void close() throws IOException {
- finish();
- os.close();
- }
-
- /**
- * Compresses all remaining data and writes it to the stream,
- * doesn't close the underlying stream.
- * @throws IOException if an error occurs
- */
- public void finish() throws IOException {
- if (!finished) {
- compressor.finish();
- finished = true;
- }
- }
-
- /**
- * Adds some initial data to fill the window with.
- *
- * @param data the data to fill the window with.
- * @param off offset of real data into the array
- * @param len amount of data
- * @throws IllegalStateException if the stream has already started to write data
- * @see LZ77Compressor#prefill
- */
- public void prefill(byte[] data, int off, int len) {
- if (len > 0) {
- byte[] b = Arrays.copyOfRange(data, off, off + len);
- compressor.prefill(b);
- recordLiteral(b);
- }
- }
-
- private void addLiteralBlock(LZ77Compressor.LiteralBlock block) throws IOException {
- Pair last = writeBlocksAndReturnUnfinishedPair(block.getLength());
- recordLiteral(last.addLiteral(block));
- clearUnusedBlocksAndPairs();
- }
-
- private void addBackReference(LZ77Compressor.BackReference block) throws IOException {
- Pair last = writeBlocksAndReturnUnfinishedPair(block.getLength());
- last.setBackReference(block);
- recordBackReference(block);
- clearUnusedBlocksAndPairs();
- }
-
- private Pair writeBlocksAndReturnUnfinishedPair(int length) throws IOException {
- writeWritablePairs(length);
- Pair last = pairs.peekLast();
- if (last == null || last.hasBackReference()) {
- last = new Pair();
- pairs.addLast(last);
- }
- return last;
- }
-
- private void recordLiteral(byte[] b) {
- expandedBlocks.addFirst(b);
- }
-
- private void clearUnusedBlocksAndPairs() {
- clearUnusedBlocks();
- clearUnusedPairs();
- }
-
- private void clearUnusedBlocks() {
- int blockLengths = 0;
- int blocksToKeep = 0;
- for (byte[] b : expandedBlocks) {
- blocksToKeep++;
- blockLengths += b.length;
- if (blockLengths >= BlockLZ4CompressorInputStream.WINDOW_SIZE) {
- break;
- }
- }
- final int size = expandedBlocks.size();
- for (int i = blocksToKeep; i < size; i++) {
- expandedBlocks.removeLast();
- }
- }
-
- private void recordBackReference(LZ77Compressor.BackReference block) {
- expandedBlocks.addFirst(expand(block.getOffset(), block.getLength()));
- }
-
- private byte[] expand(final int offset, final int length) {
- byte[] expanded = new byte[length];
- if (offset == 1) { // surprisingly common special case
- byte[] block = expandedBlocks.peekFirst();
- byte b = block[block.length - 1];
- if (b != 0) { // the fresh array contains 0s anyway
- Arrays.fill(expanded, b);
- }
- } else {
- expandFromList(expanded, offset, length);
- }
- return expanded;
- }
-
- private void expandFromList(final byte[] expanded, int offset, int length) {
- int offsetRemaining = offset;
- int lengthRemaining = length;
- int writeOffset = 0;
- while (lengthRemaining > 0) {
- // find block that contains offsetRemaining
- byte[] block = null;
- int copyLen, copyOffset;
- if (offsetRemaining > 0) {
- int blockOffset = 0;
- for (byte[] b : expandedBlocks) {
- if (b.length + blockOffset >= offsetRemaining) {
- block = b;
- break;
- }
- blockOffset += b.length;
- }
- if (block == null) {
- // should not be possible
- throw new IllegalStateException("failed to find a block containing offset " + offset);
- }
- copyOffset = blockOffset + block.length - offsetRemaining;
- copyLen = Math.min(lengthRemaining, block.length - copyOffset);
- } else {
- // offsetRemaining is negative or 0 and points into the expanded bytes
- block = expanded;
- copyOffset = -offsetRemaining;
- copyLen = Math.min(lengthRemaining, writeOffset + offsetRemaining);
- }
- System.arraycopy(block, copyOffset, expanded, writeOffset, copyLen);
- offsetRemaining -= copyLen;
- lengthRemaining -= copyLen;
- writeOffset += copyLen;
- }
- }
-
- private void clearUnusedPairs() {
- int pairLengths = 0;
- int pairsToKeep = 0;
- for (Iterator it = pairs.descendingIterator(); it.hasNext(); ) {
- Pair p = it.next();
- pairsToKeep++;
- pairLengths += p.length();
- if (pairLengths >= BlockLZ4CompressorInputStream.WINDOW_SIZE) {
- break;
- }
- }
- final int size = pairs.size();
- for (int i = pairsToKeep; i < size; i++) {
- Pair p = pairs.peekFirst();
- if (p.hasBeenWritten()) {
- pairs.removeFirst();
- } else {
- break;
- }
- }
- }
-
- private void writeFinalLiteralBlock() throws IOException {
- rewriteLastPairs();
- for (Pair p : pairs) {
- if (!p.hasBeenWritten()) {
- p.writeTo(os);
- }
- }
- pairs.clear();
- }
-
- private void writeWritablePairs(int lengthOfBlocksAfterLastPair) throws IOException {
- int unwrittenLength = lengthOfBlocksAfterLastPair;
- for (Iterator it = pairs.descendingIterator(); it.hasNext(); ) {
- Pair p = it.next();
- if (p.hasBeenWritten()) {
- break;
- }
- unwrittenLength += p.length();
- }
- for (Pair p : pairs) {
- if (p.hasBeenWritten()) {
- continue;
- }
- unwrittenLength -= p.length();
- if (p.canBeWritten(unwrittenLength)) {
- p.writeTo(os);
- } else {
- break;
- }
- }
- }
-
- private void rewriteLastPairs() {
- LinkedList lastPairs = new LinkedList<>();
- LinkedList pairLength = new LinkedList<>();
- int offset = 0;
- for (Iterator it = pairs.descendingIterator(); it.hasNext(); ) {
- Pair p = it.next();
- if (p.hasBeenWritten()) {
- break;
- }
- int len = p.length();
- pairLength.addFirst(len);
- lastPairs.addFirst(p);
- offset += len;
- if (offset >= MIN_OFFSET_OF_LAST_BACK_REFERENCE) {
- break;
- }
- }
- for (Pair p : lastPairs) {
- pairs.remove(p);
- }
- // lastPairs may contain between one and four Pairs:
- // * the last pair may be a one byte literal
- // * all other Pairs contain a back-reference which must be four bytes long at minimum
- // we could merge them all into a single literal block but
- // this may harm compression. For example compressing
- // "bla.tar" from our tests yields a last block containing a
- // back-reference of length > 2k and we'd end up with a last
- // literal of that size rather than a 2k back-reference and a
- // 12 byte literal at the end.
-
- // Instead we merge all but the first of lastPairs into a new
- // literal-only Pair "replacement" and look at the
- // back-reference in the first of lastPairs and see if we can
- // split it. We can split it if it is longer than 16 -
- // replacement.length (i.e. the minimal length of four is kept
- // while making sure the last literal is at least twelve bytes
- // long). If we can't split it, we expand the first of the pairs
- // as well.
-
- // this is not optimal, we could get better compression
- // results with more complex approaches as the last literal
- // only needs to be five bytes long if the previous
- // back-reference has an offset big enough
-
- final int lastPairsSize = lastPairs.size();
- int toExpand = 0;
- for (int i = 1; i < lastPairsSize; i++) {
- toExpand += pairLength.get(i);
- }
- Pair replacement = new Pair();
- if (toExpand > 0) {
- replacement.prependLiteral(expand(toExpand, toExpand));
- }
- Pair splitCandidate = lastPairs.get(0);
- int stillNeeded = MIN_OFFSET_OF_LAST_BACK_REFERENCE - toExpand;
- int brLen = splitCandidate.hasBackReference() ? splitCandidate.backReferenceLength() : 0;
- if (splitCandidate.hasBackReference() && brLen >= MIN_BACK_REFERENCE_LENGTH + stillNeeded) {
- replacement.prependLiteral(expand(toExpand + stillNeeded, stillNeeded));
- pairs.add(splitCandidate.splitWithNewBackReferenceLengthOf(brLen - stillNeeded));
- } else {
- if (splitCandidate.hasBackReference()) {
- replacement.prependLiteral(expand(toExpand + brLen, brLen));
- }
- splitCandidate.prependTo(replacement);
- }
- pairs.add(replacement);
- }
-
- /**
- * Returns a builder correctly configured for the LZ4 algorithm.
- * @return a builder correctly configured for the LZ4 algorithm
- */
- public static Parameters.Builder createParameterBuilder() {
- int maxLen = BlockLZ4CompressorInputStream.WINDOW_SIZE - 1;
- return Parameters.builder(BlockLZ4CompressorInputStream.WINDOW_SIZE)
- .withMinBackReferenceLength(MIN_BACK_REFERENCE_LENGTH)
- .withMaxBackReferenceLength(maxLen)
- .withMaxOffset(maxLen)
- .withMaxLiteralLength(maxLen);
- }
-
- final static class Pair {
- private final Deque literals = new LinkedList<>();
- private int brOffset, brLength;
- private boolean written;
-
- private void prependLiteral(byte[] data) {
- literals.addFirst(data);
- }
- byte[] addLiteral(LZ77Compressor.LiteralBlock block) {
- byte[] copy = Arrays.copyOfRange(block.getData(), block.getOffset(),
- block.getOffset() + block.getLength());
- literals.add(copy);
- return copy;
- }
- void setBackReference(LZ77Compressor.BackReference block) {
- if (hasBackReference()) {
- throw new IllegalStateException();
- }
- brOffset = block.getOffset();
- brLength = block.getLength();
- }
- boolean hasBackReference() {
- return brOffset > 0;
- }
- boolean canBeWritten(int lengthOfBlocksAfterThisPair) {
- return hasBackReference()
- && lengthOfBlocksAfterThisPair >= MIN_OFFSET_OF_LAST_BACK_REFERENCE + MIN_BACK_REFERENCE_LENGTH;
- }
- int length() {
- return literalLength() + brLength;
- }
- private boolean hasBeenWritten() {
- return written;
- }
- void writeTo(OutputStream out) throws IOException {
- int litLength = literalLength();
- out.write(lengths(litLength, brLength));
- if (litLength >= BlockLZ4CompressorInputStream.BACK_REFERENCE_SIZE_MASK) {
- writeLength(litLength - BlockLZ4CompressorInputStream.BACK_REFERENCE_SIZE_MASK, out);
- }
- for (byte[] b : literals) {
- out.write(b);
- }
- if (hasBackReference()) {
- ByteUtils.toLittleEndian(out, brOffset, 2);
- if (brLength - MIN_BACK_REFERENCE_LENGTH >= BlockLZ4CompressorInputStream.BACK_REFERENCE_SIZE_MASK) {
- writeLength(brLength - MIN_BACK_REFERENCE_LENGTH
- - BlockLZ4CompressorInputStream.BACK_REFERENCE_SIZE_MASK, out);
- }
- }
- written = true;
- }
- private int literalLength() {
- int length = 0;
- for (byte[] b : literals) {
- length += b.length;
- }
- return length;
- }
- private static int lengths(int litLength, int brLength) {
- int l = litLength < 15 ? litLength : 15;
- int br = brLength < 4 ? 0 : (brLength < 19 ? brLength - 4 : 15);
- return (l << BlockLZ4CompressorInputStream.SIZE_BITS) | br;
- }
- private static void writeLength(int length, OutputStream out) throws IOException {
- while (length >= 255) {
- out.write(255);
- length -= 255;
- }
- out.write(length);
- }
- private int backReferenceLength() {
- return brLength;
- }
- private void prependTo(Pair other) {
- Iterator listBackwards = literals.descendingIterator();
- while (listBackwards.hasNext()) {
- other.prependLiteral(listBackwards.next());
- }
- }
- private Pair splitWithNewBackReferenceLengthOf(int newBackReferenceLength) {
- Pair p = new Pair();
- p.literals.addAll(literals);
- p.brOffset = brOffset;
- p.brLength = newBackReferenceLength;
- return p;
- }
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lz4/FramedLZ4CompressorInputStream.java b/src/org/apache/commons/compress/compressors/lz4/FramedLZ4CompressorInputStream.java
deleted file mode 100644
index c4c4f492a96..00000000000
--- a/src/org/apache/commons/compress/compressors/lz4/FramedLZ4CompressorInputStream.java
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lz4;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.BoundedInputStream;
-import org.apache.commons.compress.utils.ByteUtils;
-import org.apache.commons.compress.utils.ChecksumCalculatingInputStream;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * CompressorInputStream for the LZ4 frame format.
- *
- *
Based on the "spec" in the version "1.5.1 (31/03/2015)"
- *
- * @see LZ4 Frame Format Description
- * @since 1.14
- * @NotThreadSafe
- */
-public class FramedLZ4CompressorInputStream extends CompressorInputStream
- implements InputStreamStatistics {
-
- // used by FramedLZ4CompressorOutputStream as well
- static final byte[] LZ4_SIGNATURE = new byte[] { //NOSONAR
- 4, 0x22, 0x4d, 0x18
- };
- private static final byte[] SKIPPABLE_FRAME_TRAILER = new byte[] {
- 0x2a, 0x4d, 0x18
- };
- private static final byte SKIPPABLE_FRAME_PREFIX_BYTE_MASK = 0x50;
-
- static final int VERSION_MASK = 0xC0;
- static final int SUPPORTED_VERSION = 0x40;
- static final int BLOCK_INDEPENDENCE_MASK = 0x20;
- static final int BLOCK_CHECKSUM_MASK = 0x10;
- static final int CONTENT_SIZE_MASK = 0x08;
- static final int CONTENT_CHECKSUM_MASK = 0x04;
- static final int BLOCK_MAX_SIZE_MASK = 0x70;
- static final int UNCOMPRESSED_FLAG_MASK = 0x80000000;
-
- // used in no-arg read method
- private final byte[] oneByte = new byte[1];
-
- private final ByteUtils.ByteSupplier supplier = new ByteUtils.ByteSupplier() {
- @Override
- public int getAsByte() throws IOException {
- return readOneByte();
- }
- };
-
- private final CountingInputStream in;
- private final boolean decompressConcatenated;
-
- private boolean expectBlockChecksum;
- private boolean expectBlockDependency;
- private boolean expectContentSize;
- private boolean expectContentChecksum;
-
- private InputStream currentBlock;
- private boolean endReached, inUncompressed;
-
- // used for frame header checksum and content checksum, if present
- private final XXHash32 contentHash = new XXHash32();
-
- // used for block checksum, if present
- private final XXHash32 blockHash = new XXHash32();
-
- // only created if the frame doesn't set the block independence flag
- private byte[] blockDependencyBuffer;
-
- /**
- * Creates a new input stream that decompresses streams compressed
- * using the LZ4 frame format and stops after decompressing the
- * first frame.
- * @param in the InputStream from which to read the compressed data
- * @throws IOException if reading fails
- */
- public FramedLZ4CompressorInputStream(InputStream in) throws IOException {
- this(in, false);
- }
-
- /**
- * Creates a new input stream that decompresses streams compressed
- * using the LZ4 frame format.
- * @param in the InputStream from which to read the compressed data
- * @param decompressConcatenated if true, decompress until the end
- * of the input; if false, stop after the first LZ4 frame
- * and leave the input position to point to the next byte
- * after the frame stream
- * @throws IOException if reading fails
- */
- public FramedLZ4CompressorInputStream(InputStream in, boolean decompressConcatenated) throws IOException {
- this.in = new CountingInputStream(in);
- this.decompressConcatenated = decompressConcatenated;
- init(true);
- }
-
- /** {@inheritDoc} */
- @Override
- public int read() throws IOException {
- return read(oneByte, 0, 1) == -1 ? -1 : oneByte[0] & 0xFF;
- }
-
- /** {@inheritDoc} */
- @Override
- public void close() throws IOException {
- if (currentBlock != null) {
- currentBlock.close();
- currentBlock = null;
- }
- in.close();
- }
-
- /** {@inheritDoc} */
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- if (endReached) {
- return -1;
- }
- int r = readOnce(b, off, len);
- if (r == -1) {
- nextBlock();
- if (!endReached) {
- r = readOnce(b, off, len);
- }
- }
- if (r != -1) {
- if (expectBlockDependency) {
- appendToBlockDependencyBuffer(b, off, r);
- }
- if (expectContentChecksum) {
- contentHash.update(b, off, r);
- }
- }
- return r;
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return in.getBytesRead();
- }
-
- private void init(boolean firstFrame) throws IOException {
- if (readSignature(firstFrame)) {
- readFrameDescriptor();
- nextBlock();
- }
- }
-
- private boolean readSignature(boolean firstFrame) throws IOException {
- String garbageMessage = firstFrame ? "Not a LZ4 frame stream" : "LZ4 frame stream followed by garbage";
- final byte[] b = new byte[4];
- int read = IOUtils.readFully(in, b);
- count(read);
- if (0 == read && !firstFrame) {
- // good LZ4 frame and nothing after it
- endReached = true;
- return false;
- }
- if (4 != read) {
- throw new IOException(garbageMessage);
- }
-
- read = skipSkippableFrame(b);
- if (0 == read && !firstFrame) {
- // good LZ4 frame with only some skippable frames after it
- endReached = true;
- return false;
- }
- if (4 != read || !matches(b, 4)) {
- throw new IOException(garbageMessage);
- }
- return true;
- }
-
- private void readFrameDescriptor() throws IOException {
- int flags = readOneByte();
- if (flags == -1) {
- throw new IOException("Premature end of stream while reading frame flags");
- }
- contentHash.update(flags);
- if ((flags & VERSION_MASK) != SUPPORTED_VERSION) {
- throw new IOException("Unsupported version " + (flags >> 6));
- }
- expectBlockDependency = (flags & BLOCK_INDEPENDENCE_MASK) == 0;
- if (expectBlockDependency) {
- if (blockDependencyBuffer == null) {
- blockDependencyBuffer = new byte[BlockLZ4CompressorInputStream.WINDOW_SIZE];
- }
- } else {
- blockDependencyBuffer = null;
- }
- expectBlockChecksum = (flags & BLOCK_CHECKSUM_MASK) != 0;
- expectContentSize = (flags & CONTENT_SIZE_MASK) != 0;
- expectContentChecksum = (flags & CONTENT_CHECKSUM_MASK) != 0;
- int bdByte = readOneByte();
- if (bdByte == -1) { // max size is irrelevant for this implementation
- throw new IOException("Premature end of stream while reading frame BD byte");
- }
- contentHash.update(bdByte);
- if (expectContentSize) { // for now we don't care, contains the uncompressed size
- byte[] contentSize = new byte[8];
- int skipped = IOUtils.readFully(in, contentSize);
- count(skipped);
- if (8 != skipped) {
- throw new IOException("Premature end of stream while reading content size");
- }
- contentHash.update(contentSize, 0, contentSize.length);
- }
- int headerHash = readOneByte();
- if (headerHash == -1) { // partial hash of header.
- throw new IOException("Premature end of stream while reading frame header checksum");
- }
- int expectedHash = (int) ((contentHash.getValue() >> 8) & 0xff);
- contentHash.reset();
- if (headerHash != expectedHash) {
- throw new IOException("frame header checksum mismatch.");
- }
- }
-
- private void nextBlock() throws IOException {
- maybeFinishCurrentBlock();
- long len = ByteUtils.fromLittleEndian(supplier, 4);
- boolean uncompressed = (len & UNCOMPRESSED_FLAG_MASK) != 0;
- int realLen = (int) (len & (~UNCOMPRESSED_FLAG_MASK));
- if (realLen == 0) {
- verifyContentChecksum();
- if (!decompressConcatenated) {
- endReached = true;
- } else {
- init(false);
- }
- return;
- }
- InputStream capped = new BoundedInputStream(in, realLen);
- if (expectBlockChecksum) {
- capped = new ChecksumCalculatingInputStream(blockHash, capped);
- }
- if (uncompressed) {
- inUncompressed = true;
- currentBlock = capped;
- } else {
- inUncompressed = false;
- BlockLZ4CompressorInputStream s = new BlockLZ4CompressorInputStream(capped);
- if (expectBlockDependency) {
- s.prefill(blockDependencyBuffer);
- }
- currentBlock = s;
- }
- }
-
- private void maybeFinishCurrentBlock() throws IOException {
- if (currentBlock != null) {
- currentBlock.close();
- currentBlock = null;
- if (expectBlockChecksum) {
- verifyChecksum(blockHash, "block");
- blockHash.reset();
- }
- }
- }
-
- private void verifyContentChecksum() throws IOException {
- if (expectContentChecksum) {
- verifyChecksum(contentHash, "content");
- }
- contentHash.reset();
- }
-
- private void verifyChecksum(XXHash32 hash, String kind) throws IOException {
- byte[] checksum = new byte[4];
- int read = IOUtils.readFully(in, checksum);
- count(read);
- if (4 != read) {
- throw new IOException("Premature end of stream while reading " + kind + " checksum");
- }
- long expectedHash = hash.getValue();
- if (expectedHash != ByteUtils.fromLittleEndian(checksum)) {
- throw new IOException(kind + " checksum mismatch.");
- }
- }
-
- private int readOneByte() throws IOException {
- final int b = in.read();
- if (b != -1) {
- count(1);
- return b & 0xFF;
- }
- return -1;
- }
-
- private int readOnce(byte[] b, int off, int len) throws IOException {
- if (inUncompressed) {
- int cnt = currentBlock.read(b, off, len);
- count(cnt);
- return cnt;
- }
- BlockLZ4CompressorInputStream l = (BlockLZ4CompressorInputStream) currentBlock;
- long before = l.getBytesRead();
- int cnt = currentBlock.read(b, off, len);
- count(l.getBytesRead() - before);
- return cnt;
- }
-
- private static boolean isSkippableFrameSignature(byte[] b) {
- if ((b[0] & SKIPPABLE_FRAME_PREFIX_BYTE_MASK) != SKIPPABLE_FRAME_PREFIX_BYTE_MASK) {
- return false;
- }
- for (int i = 1; i < 4; i++) {
- if (b[i] != SKIPPABLE_FRAME_TRAILER[i - 1]) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Skips over the contents of a skippable frame as well as
- * skippable frames following it.
- *
- *
It then tries to read four more bytes which are supposed to
- * hold an LZ4 signature and returns the number of bytes read
- * while storing the bytes in the given array.
- */
- private int skipSkippableFrame(byte[] b) throws IOException {
- int read = 4;
- while (read == 4 && isSkippableFrameSignature(b)) {
- long len = ByteUtils.fromLittleEndian(supplier, 4);
- long skipped = IOUtils.skip(in, len);
- count(skipped);
- if (len != skipped) {
- throw new IOException("Premature end of stream while skipping frame");
- }
- read = IOUtils.readFully(in, b);
- count(read);
- }
- return read;
- }
-
- private void appendToBlockDependencyBuffer(final byte[] b, final int off, int len) {
- len = Math.min(len, blockDependencyBuffer.length);
- if (len > 0) {
- int keep = blockDependencyBuffer.length - len;
- if (keep > 0) {
- // move last keep bytes towards the start of the buffer
- System.arraycopy(blockDependencyBuffer, len, blockDependencyBuffer, 0, keep);
- }
- // append new data
- System.arraycopy(b, off, blockDependencyBuffer, keep, len);
- }
- }
-
- /**
- * Checks if the signature matches what is expected for a .lz4 file.
- *
- *
.lz4 files start with a four byte signature.
- *
- * @param signature the bytes to check
- * @param length the number of bytes to check
- * @return true if this is a .sz stream, false otherwise
- */
- public static boolean matches(final byte[] signature, final int length) {
-
- if (length < LZ4_SIGNATURE.length) {
- return false;
- }
-
- byte[] shortenedSig = signature;
- if (signature.length > LZ4_SIGNATURE.length) {
- shortenedSig = new byte[LZ4_SIGNATURE.length];
- System.arraycopy(signature, 0, shortenedSig, 0, LZ4_SIGNATURE.length);
- }
-
- return Arrays.equals(shortenedSig, LZ4_SIGNATURE);
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lz4/FramedLZ4CompressorOutputStream.java b/src/org/apache/commons/compress/compressors/lz4/FramedLZ4CompressorOutputStream.java
deleted file mode 100644
index 0b11eff234c..00000000000
--- a/src/org/apache/commons/compress/compressors/lz4/FramedLZ4CompressorOutputStream.java
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lz4;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-import org.apache.commons.compress.utils.ByteUtils;
-
-/**
- * CompressorOutputStream for the LZ4 frame format.
- *
- *
Based on the "spec" in the version "1.5.1 (31/03/2015)"
- *
- * @see LZ4 Frame Format Description
- * @since 1.14
- * @NotThreadSafe
- */
-public class FramedLZ4CompressorOutputStream extends CompressorOutputStream {
-
- private static final byte[] END_MARK = new byte[4];
-
- // used in one-arg write method
- private final byte[] oneByte = new byte[1];
-
- private final byte[] blockData;
- private final OutputStream out;
- private final Parameters params;
- private boolean finished = false;
- private int currentIndex = 0;
-
- // used for frame header checksum and content checksum, if requested
- private final XXHash32 contentHash = new XXHash32();
- // used for block checksum, if requested
- private final XXHash32 blockHash;
-
- // only created if the config requires block dependency
- private byte[] blockDependencyBuffer;
- private int collectedBlockDependencyBytes;
-
- /**
- * The block sizes supported by the format.
- */
- public enum BlockSize {
- /** Block size of 64K */
- K64(64 * 1024, 4),
- /** Block size of 256K */
- K256(256 * 1024, 5),
- /** Block size of 1M */
- M1(1024 * 1024, 6),
- /** Block size of 4M */
- M4(4096 * 1024, 7);
-
- private final int size, index;
- BlockSize(int size, int index) {
- this.size = size;
- this.index = index;
- }
- int getSize() {
- return size;
- }
- int getIndex() {
- return index;
- }
- }
-
- /**
- * Parameters of the LZ4 frame format.
- */
- public static class Parameters {
- private final BlockSize blockSize;
- private final boolean withContentChecksum, withBlockChecksum, withBlockDependency;
- private final org.apache.commons.compress.compressors.lz77support.Parameters lz77params;
-
- /**
- * The default parameters of 4M block size, enabled content
- * checksum, disabled block checksums and independent blocks.
- *
- *
This matches the defaults of the lz4 command line utility.
- */
- public static final Parameters DEFAULT = new Parameters(BlockSize.M4, true, false, false);
-
- /**
- * Sets up custom a custom block size for the LZ4 stream but
- * otherwise uses the defaults of enabled content checksum,
- * disabled block checksums and independent blocks.
- * @param blockSize the size of a single block.
- */
- public Parameters(BlockSize blockSize) {
- this(blockSize, true, false, false);
- }
- /**
- * Sets up custom a custom block size for the LZ4 stream but
- * otherwise uses the defaults of enabled content checksum,
- * disabled block checksums and independent blocks.
- * @param blockSize the size of a single block.
- * @param lz77params parameters used to fine-tune compression,
- * in particular to balance compression ratio vs compression
- * speed.
- */
- public Parameters(BlockSize blockSize,
- org.apache.commons.compress.compressors.lz77support.Parameters lz77params) {
- this(blockSize, true, false, false, lz77params);
- }
- /**
- * Sets up custom parameters for the LZ4 stream.
- * @param blockSize the size of a single block.
- * @param withContentChecksum whether to write a content checksum
- * @param withBlockChecksum whether to write a block checksum.
- * Note that block checksums are not supported by the lz4
- * command line utility
- * @param withBlockDependency whether a block may depend on
- * the content of a previous block. Enabling this may improve
- * compression ratio but makes it impossible to decompress the
- * output in parallel.
- */
- public Parameters(BlockSize blockSize, boolean withContentChecksum, boolean withBlockChecksum,
- boolean withBlockDependency) {
- this(blockSize, withContentChecksum, withBlockChecksum, withBlockDependency,
- BlockLZ4CompressorOutputStream.createParameterBuilder().build());
- }
-
- /**
- * Sets up custom parameters for the LZ4 stream.
- * @param blockSize the size of a single block.
- * @param withContentChecksum whether to write a content checksum
- * @param withBlockChecksum whether to write a block checksum.
- * Note that block checksums are not supported by the lz4
- * command line utility
- * @param withBlockDependency whether a block may depend on
- * the content of a previous block. Enabling this may improve
- * compression ratio but makes it impossible to decompress the
- * output in parallel.
- * @param lz77params parameters used to fine-tune compression,
- * in particular to balance compression ratio vs compression
- * speed.
- */
- public Parameters(BlockSize blockSize, boolean withContentChecksum, boolean withBlockChecksum,
- boolean withBlockDependency,
- org.apache.commons.compress.compressors.lz77support.Parameters lz77params) {
- this.blockSize = blockSize;
- this.withContentChecksum = withContentChecksum;
- this.withBlockChecksum = withBlockChecksum;
- this.withBlockDependency = withBlockDependency;
- this.lz77params = lz77params;
- }
-
- @Override
- public String toString() {
- return "LZ4 Parameters with BlockSize " + blockSize + ", withContentChecksum " + withContentChecksum
- + ", withBlockChecksum " + withBlockChecksum + ", withBlockDependency " + withBlockDependency;
- }
- }
-
- /**
- * Constructs a new output stream that compresses data using the
- * LZ4 frame format using the default block size of 4MB.
- * @param out the OutputStream to which to write the compressed data
- * @throws IOException if writing the signature fails
- */
- public FramedLZ4CompressorOutputStream(OutputStream out) throws IOException {
- this(out, Parameters.DEFAULT);
- }
-
- /**
- * Constructs a new output stream that compresses data using the
- * LZ4 frame format using the given block size.
- * @param out the OutputStream to which to write the compressed data
- * @param params the parameters to use
- * @throws IOException if writing the signature fails
- */
- public FramedLZ4CompressorOutputStream(OutputStream out, Parameters params) throws IOException {
- this.params = params;
- blockData = new byte[params.blockSize.getSize()];
- this.out = out;
- blockHash = params.withBlockChecksum ? new XXHash32() : null;
- out.write(FramedLZ4CompressorInputStream.LZ4_SIGNATURE);
- writeFrameDescriptor();
- blockDependencyBuffer = params.withBlockDependency
- ? new byte[BlockLZ4CompressorInputStream.WINDOW_SIZE]
- : null;
- }
-
- @Override
- public void write(int b) throws IOException {
- oneByte[0] = (byte) (b & 0xff);
- write(oneByte);
- }
-
- @Override
- public void write(byte[] data, int off, int len) throws IOException {
- if (params.withContentChecksum) {
- contentHash.update(data, off, len);
- }
- if (currentIndex + len > blockData.length) {
- flushBlock();
- while (len > blockData.length) {
- System.arraycopy(data, off, blockData, 0, blockData.length);
- off += blockData.length;
- len -= blockData.length;
- currentIndex = blockData.length;
- flushBlock();
- }
- }
- System.arraycopy(data, off, blockData, currentIndex, len);
- currentIndex += len;
- }
-
- @Override
- public void close() throws IOException {
- finish();
- out.close();
- }
-
- /**
- * Compresses all remaining data and writes it to the stream,
- * doesn't close the underlying stream.
- * @throws IOException if an error occurs
- */
- public void finish() throws IOException {
- if (!finished) {
- if (currentIndex > 0) {
- flushBlock();
- }
- writeTrailer();
- finished = true;
- }
- }
-
- private void writeFrameDescriptor() throws IOException {
- int flags = FramedLZ4CompressorInputStream.SUPPORTED_VERSION;
- if (!params.withBlockDependency) {
- flags |= FramedLZ4CompressorInputStream.BLOCK_INDEPENDENCE_MASK;
- }
- if (params.withContentChecksum) {
- flags |= FramedLZ4CompressorInputStream.CONTENT_CHECKSUM_MASK;
- }
- if (params.withBlockChecksum) {
- flags |= FramedLZ4CompressorInputStream.BLOCK_CHECKSUM_MASK;
- }
- out.write(flags);
- contentHash.update(flags);
- int bd = (params.blockSize.getIndex() << 4) & FramedLZ4CompressorInputStream.BLOCK_MAX_SIZE_MASK;
- out.write(bd);
- contentHash.update(bd);
- out.write((int) ((contentHash.getValue() >> 8) & 0xff));
- contentHash.reset();
- }
-
- private void flushBlock() throws IOException {
- final boolean withBlockDependency = params.withBlockDependency;
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- try (BlockLZ4CompressorOutputStream o = new BlockLZ4CompressorOutputStream(baos, params.lz77params)) {
- if (withBlockDependency) {
- o.prefill(blockDependencyBuffer, blockDependencyBuffer.length - collectedBlockDependencyBytes,
- collectedBlockDependencyBytes);
- }
- o.write(blockData, 0, currentIndex);
- }
- if (withBlockDependency) {
- appendToBlockDependencyBuffer(blockData, 0, currentIndex);
- }
- byte[] b = baos.toByteArray();
- if (b.length > currentIndex) { // compression increased size, maybe beyond blocksize
- ByteUtils.toLittleEndian(out, currentIndex | FramedLZ4CompressorInputStream.UNCOMPRESSED_FLAG_MASK,
- 4);
- out.write(blockData, 0, currentIndex);
- if (params.withBlockChecksum) {
- blockHash.update(blockData, 0, currentIndex);
- }
- } else {
- ByteUtils.toLittleEndian(out, b.length, 4);
- out.write(b);
- if (params.withBlockChecksum) {
- blockHash.update(b, 0, b.length);
- }
- }
- if (params.withBlockChecksum) {
- ByteUtils.toLittleEndian(out, blockHash.getValue(), 4);
- blockHash.reset();
- }
- currentIndex = 0;
- }
-
- private void writeTrailer() throws IOException {
- out.write(END_MARK);
- if (params.withContentChecksum) {
- ByteUtils.toLittleEndian(out, contentHash.getValue(), 4);
- }
- }
-
- private void appendToBlockDependencyBuffer(final byte[] b, final int off, int len) {
- len = Math.min(len, blockDependencyBuffer.length);
- if (len > 0) {
- int keep = blockDependencyBuffer.length - len;
- if (keep > 0) {
- // move last keep bytes towards the start of the buffer
- System.arraycopy(blockDependencyBuffer, len, blockDependencyBuffer, 0, keep);
- }
- // append new data
- System.arraycopy(b, off, blockDependencyBuffer, keep, len);
- collectedBlockDependencyBytes = Math.min(collectedBlockDependencyBytes + len,
- blockDependencyBuffer.length);
- }
- }
-
-}
-
diff --git a/src/org/apache/commons/compress/compressors/lz4/XXHash32.java b/src/org/apache/commons/compress/compressors/lz4/XXHash32.java
deleted file mode 100644
index 23d29b5f470..00000000000
--- a/src/org/apache/commons/compress/compressors/lz4/XXHash32.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lz4;
-
-import static java.lang.Integer.rotateLeft;
-
-import java.util.zip.Checksum;
-
-import static org.apache.commons.compress.utils.ByteUtils.fromLittleEndian;
-
-/**
- * Implementation of the xxhash32 hash algorithm.
- *
- * @see xxHash
- * @NotThreadSafe
- * @since 1.14
- */
-public class XXHash32 implements Checksum {
-
- private static final int BUF_SIZE = 16;
- private static final int ROTATE_BITS = 13;
-
- private static final int PRIME1 = (int) 2654435761L;
- private static final int PRIME2 = (int) 2246822519L;
- private static final int PRIME3 = (int) 3266489917L;
- private static final int PRIME4 = 668265263;
- private static final int PRIME5 = 374761393;
-
- private final byte[] oneByte = new byte[1];
- private final int[] state = new int[4];
- // Note: the code used to use ByteBuffer but the manual method is 50% faster
- // See: https://git-wip-us.apache.org/repos/asf/commons-compress/diff/2f56fb5c
- private final byte[] buffer = new byte[BUF_SIZE];
- private final int seed;
-
- private int totalLen;
- private int pos;
-
- /**
- * Creates an XXHash32 instance with a seed of 0.
- */
- public XXHash32() {
- this(0);
- }
-
- /**
- * Creates an XXHash32 instance.
- * @param seed the seed to use
- */
- public XXHash32(int seed) {
- this.seed = seed;
- initializeState();
- }
-
- @Override
- public void reset() {
- initializeState();
- totalLen = 0;
- pos = 0;
- }
-
- @Override
- public void update(int b) {
- oneByte[0] = (byte) (b & 0xff);
- update(oneByte, 0, 1);
- }
-
- @Override
- public void update(byte[] b, int off, final int len) {
- if (len <= 0) {
- return;
- }
- totalLen += len;
-
- final int end = off + len;
-
- if (pos + len < BUF_SIZE) {
- System.arraycopy(b, off, buffer, pos, len);
- pos += len;
- return;
- }
-
- if (pos > 0) {
- final int size = BUF_SIZE - pos;
- System.arraycopy(b, off, buffer, pos, size);
- process(buffer, 0);
- off += size;
- }
-
- final int limit = end - BUF_SIZE;
- while (off <= limit) {
- process(b, off);
- off += BUF_SIZE;
- }
-
- if (off < end) {
- pos = end - off;
- System.arraycopy(b, off, buffer, 0, pos);
- }
- }
-
- @Override
- public long getValue() {
- int hash;
- if (totalLen > BUF_SIZE) {
- hash =
- rotateLeft(state[0], 1) +
- rotateLeft(state[1], 7) +
- rotateLeft(state[2], 12) +
- rotateLeft(state[3], 18);
- } else {
- hash = state[2] + PRIME5;
- }
- hash += totalLen;
-
- int idx = 0;
- final int limit = pos - 4;
- for (; idx <= limit; idx += 4) {
- hash = rotateLeft(hash + getInt(buffer, idx) * PRIME3, 17) * PRIME4;
- }
- while (idx < pos) {
- hash = rotateLeft(hash + (buffer[idx++] & 0xff) * PRIME5, 11) * PRIME1;
- }
-
- hash ^= hash >>> 15;
- hash *= PRIME2;
- hash ^= hash >>> 13;
- hash *= PRIME3;
- hash ^= hash >>> 16;
- return hash & 0xffffffffL;
- }
-
- private static int getInt(byte[] buffer, int idx) {
- return (int) (fromLittleEndian(buffer, idx, 4) & 0xffffffffL);
- }
-
- private void initializeState() {
- state[0] = seed + PRIME1 + PRIME2;
- state[1] = seed + PRIME2;
- state[2] = seed;
- state[3] = seed - PRIME1;
- }
-
- private void process(byte[] b, int offset) {
- // local shadows for performance
- int s0 = state[0];
- int s1 = state[1];
- int s2 = state[2];
- int s3 = state[3];
-
- s0 = rotateLeft(s0 + getInt(b, offset) * PRIME2, ROTATE_BITS) * PRIME1;
- s1 = rotateLeft(s1 + getInt(b, offset + 4) * PRIME2, ROTATE_BITS) * PRIME1;
- s2 = rotateLeft(s2 + getInt(b, offset + 8) * PRIME2, ROTATE_BITS) * PRIME1;
- s3 = rotateLeft(s3 + getInt(b, offset + 12) * PRIME2, ROTATE_BITS) * PRIME1;
-
- state[0] = s0;
- state[1] = s1;
- state[2] = s2;
- state[3] = s3;
-
- pos = 0;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lz4/package.html b/src/org/apache/commons/compress/compressors/lz4/package.html
deleted file mode 100644
index 54de62bc3a0..00000000000
--- a/src/org/apache/commons/compress/compressors/lz4/package.html
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
-
-
Provides stream classes for the
- LZ4
- algorithm.
-
-
The block LZ4 format which only contains the compressed data is
- supported by the BlockLZ4Compressor*putStream
- classes while the frame format is implemented
- by FramedLZ4Compressor*putStream. The
- implementation in Commons Compress is based on the
- specifications "Last revised: 2015-03-26" for the block format
- and version "1.5.1 (31/03/2015)" for the frame format.
-
-
Only the frame format can be auto-detected this means you have
- to speficy the format explicitly if you want to read a block LZ4
- stream via CompressorStreamFactory.
-
-
diff --git a/src/org/apache/commons/compress/compressors/lz77support/AbstractLZ77CompressorInputStream.java b/src/org/apache/commons/compress/compressors/lz77support/AbstractLZ77CompressorInputStream.java
deleted file mode 100644
index 8a1371af93d..00000000000
--- a/src/org/apache/commons/compress/compressors/lz77support/AbstractLZ77CompressorInputStream.java
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lz77support;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.ByteUtils;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * Encapsulates code common to LZ77 decompressors.
- *
- *
Assumes the stream consists of blocks of literal data and
- * back-references (called copies) in any order. Of course the first
- * block must be a literal block for the scheme to work - unless the
- * {@link #prefill prefill} method has been used to provide initial
- * data that is never returned by {@link #read read} but only used for
- * back-references.
- *
- *
Subclasses must override the three-arg {@link #read read} method
- * as the no-arg version delegates to it and the default
- * implementation delegates to the no-arg version, leading to infinite
- * mutual recursion and a {@code StackOverflowError} otherwise.
- *
- *
The contract for subclasses' {@code read} implementation is:
- *
- *
- *
keep track of the current state of the stream. Is it inside a
- * literal block or a back-reference or in-between blocks?
- *
- *
Use {@link #readOneByte} to access the underlying stream
- * directly.
- *
- *
If a new literal block starts, use {@link #startLiteral} to
- * tell this class about it and read the literal data using {@link
- * #readLiteral} until it returns {@code 0}. {@link
- * #hasMoreDataInBlock} will return {@code false} before the next
- * call to {@link #readLiteral} would return {@code 0}.
- *
- *
If a new back-reference starts, use {@link #startBackReference} to
- * tell this class about it and read the literal data using {@link
- * #readBackReference} until it returns {@code 0}. {@link
- * #hasMoreDataInBlock} will return {@code false} before the next
- * call to {@link #readBackReference} would return {@code 0}.
- *
- *
If the end of the stream has been reached, return {@code -1}
- * as this class' methods will never do so themselves.
- *
- *
- *
- *
{@link #readOneByte} and {@link #readLiteral} update the counter
- * for bytes read.
- *
- * @since 1.14
- */
-public abstract class AbstractLZ77CompressorInputStream extends CompressorInputStream
- implements InputStreamStatistics {
-
- /** Size of the window - must be bigger than the biggest offset expected. */
- private final int windowSize;
-
- /**
- * Buffer to write decompressed bytes to for back-references, will
- * be three times windowSize big.
- *
- *
Three times so we can slide the whole buffer a windowSize to
- * the left once we've read twice windowSize and still have enough
- * data inside of it to satisfy back-references.
- */
- private final byte[] buf;
-
- /** One behind the index of the last byte in the buffer that was written, i.e. the next position to write to */
- private int writeIndex;
-
- /** Index of the next byte to be read. */
- private int readIndex;
-
- /** The underlying stream to read compressed data from */
- private final CountingInputStream in;
-
- /** Number of bytes still to be read from the current literal or back-reference. */
- private long bytesRemaining;
-
- /** Offset of the current back-reference. */
- private int backReferenceOffset;
-
- /** uncompressed size */
- private int size = 0;
-
- // used in no-arg read method
- private final byte[] oneByte = new byte[1];
-
- /**
- * Supplier that delegates to {@link #readOneByte}.
- */
- protected final ByteUtils.ByteSupplier supplier = new ByteUtils.ByteSupplier() {
- @Override
- public int getAsByte() throws IOException {
- return readOneByte();
- }
- };
-
- /**
- * Creates a new LZ77 input stream.
- *
- * @param is
- * An InputStream to read compressed data from
- * @param windowSize
- * Size of the window kept for back-references, must be bigger than the biggest offset expected.
- *
- * @throws IOException if reading fails
- */
- public AbstractLZ77CompressorInputStream(final InputStream is, int windowSize) throws IOException {
- this.in = new CountingInputStream(is);
- this.windowSize = windowSize;
- buf = new byte[3 * windowSize];
- writeIndex = readIndex = 0;
- bytesRemaining = 0;
- }
-
- /** {@inheritDoc} */
- @Override
- public int read() throws IOException {
- return read(oneByte, 0, 1) == -1 ? -1 : oneByte[0] & 0xFF;
- }
-
- /** {@inheritDoc} */
- @Override
- public void close() throws IOException {
- in.close();
- }
-
- /** {@inheritDoc} */
- @Override
- public int available() {
- return writeIndex - readIndex;
- }
-
- /**
- * Get the uncompressed size of the stream
- *
- * @return the uncompressed size
- */
- public int getSize() {
- return size;
- }
-
- /**
- * Adds some initial data to fill the window with.
- *
- *
This is used if the stream has been cut into blocks and
- * back-references of one block may refer to data of the previous
- * block(s). One such example is the LZ4 frame format using block
- * dependency.
- *
- * @param data the data to fill the window with.
- * @throws IllegalStateException if the stream has already started to read data
- */
- public void prefill(byte[] data) {
- if (writeIndex != 0) {
- throw new IllegalStateException("the stream has already been read from, can't prefill anymore");
- }
- // we don't need more data than the big offset could refer to, so cap it
- int len = Math.min(windowSize, data.length);
- // we need the last data as we are dealing with *back*-references
- System.arraycopy(data, data.length - len, buf, 0, len);
- writeIndex += len;
- readIndex += len;
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return in.getBytesRead();
- }
-
- /**
- * Used by subclasses to signal the next block contains the given
- * amount of literal data.
- * @param length the length of the block
- */
- protected final void startLiteral(long length) {
- bytesRemaining = length;
- }
-
- /**
- * Is there still data remaining inside the current block?
- * @return true if there is still data remaining inside the current block.
- */
- protected final boolean hasMoreDataInBlock() {
- return bytesRemaining > 0;
- }
-
- /**
- * Reads data from the current literal block.
- * @param b buffer to write data to
- * @param off offset to start writing to
- * @param len maximum amount of data to read
- * @return number of bytes read, may be 0. Will never return -1 as
- * EOF-detection is the responsibility of the subclass
- * @throws IOException if the underlying stream throws or signals
- * an EOF before the amount of data promised for the block have
- * been read
- */
- protected final int readLiteral(final byte[] b, final int off, final int len) throws IOException {
- final int avail = available();
- if (len > avail) {
- tryToReadLiteral(len - avail);
- }
- return readFromBuffer(b, off, len);
- }
-
- private void tryToReadLiteral(int bytesToRead) throws IOException {
- // min of "what is still inside the literal", "what does the user want" and "how muc can fit into the buffer"
- final int reallyTryToRead = Math.min((int) Math.min(bytesToRead, bytesRemaining),
- buf.length - writeIndex);
- final int bytesRead = reallyTryToRead > 0
- ? IOUtils.readFully(in, buf, writeIndex, reallyTryToRead)
- : 0 /* happens for bytesRemaining == 0 */;
- count(bytesRead);
- if (reallyTryToRead != bytesRead) {
- throw new IOException("Premature end of stream reading literal");
- }
- writeIndex += reallyTryToRead;
- bytesRemaining -= reallyTryToRead;
- }
-
- private int readFromBuffer(final byte[] b, final int off, final int len) {
- final int readable = Math.min(len, available());
- if (readable > 0) {
- System.arraycopy(buf, readIndex, b, off, readable);
- readIndex += readable;
- if (readIndex > 2 * windowSize) {
- slideBuffer();
- }
- }
- size += readable;
- return readable;
- }
-
- private void slideBuffer() {
- System.arraycopy(buf, windowSize, buf, 0, windowSize * 2);
- writeIndex -= windowSize;
- readIndex -= windowSize;
- }
-
- /**
- * Used by subclasses to signal the next block contains a back-reference with the given coordinates.
- * @param offset the offset of the back-reference
- * @param length the length of the back-reference
- */
- protected final void startBackReference(int offset, long length) {
- backReferenceOffset = offset;
- bytesRemaining = length;
- }
-
- /**
- * Reads data from the current back-reference.
- * @param b buffer to write data to
- * @param off offset to start writing to
- * @param len maximum amount of data to read
- * @return number of bytes read, may be 0. Will never return -1 as
- * EOF-detection is the responsibility of the subclass
- */
- protected final int readBackReference(final byte[] b, final int off, final int len) {
- final int avail = available();
- if (len > avail) {
- tryToCopy(len - avail);
- }
- return readFromBuffer(b, off, len);
- }
-
- private void tryToCopy(int bytesToCopy) {
- // this will fit into the buffer without sliding and not
- // require more than is available inside the back-reference
- int copy = Math.min((int) Math.min(bytesToCopy, bytesRemaining),
- buf.length - writeIndex);
- if (copy == 0) {
- // NOP
- } else if (backReferenceOffset == 1) { // pretty common special case
- final byte last = buf[writeIndex - 1];
- Arrays.fill(buf, writeIndex, writeIndex + copy, last);
- writeIndex += copy;
- } else if (copy < backReferenceOffset) {
- System.arraycopy(buf, writeIndex - backReferenceOffset, buf, writeIndex, copy);
- writeIndex += copy;
- } else {
- // back-reference overlaps with the bytes created from it
- // like go back two bytes and then copy six (by copying
- // the last two bytes three time).
- final int fullRots = copy / backReferenceOffset;
- for (int i = 0; i < fullRots; i++) {
- System.arraycopy(buf, writeIndex - backReferenceOffset, buf, writeIndex, backReferenceOffset);
- writeIndex += backReferenceOffset;
- }
-
- final int pad = copy - (backReferenceOffset * fullRots);
- if (pad > 0) {
- System.arraycopy(buf, writeIndex - backReferenceOffset, buf, writeIndex, pad);
- writeIndex += pad;
- }
- }
- bytesRemaining -= copy;
- }
-
- /**
- * Reads a single byte from the real input stream and ensures the data is accounted for.
- *
- * @return the byte read as value between 0 and 255 or -1 if EOF has been reached.
- * @throws IOException if the underlying stream throws
- */
- protected final int readOneByte() throws IOException {
- final int b = in.read();
- if (b != -1) {
- count(1);
- return b & 0xFF;
- }
- return -1;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lz77support/LZ77Compressor.java b/src/org/apache/commons/compress/compressors/lz77support/LZ77Compressor.java
deleted file mode 100644
index 27fec8d3c3b..00000000000
--- a/src/org/apache/commons/compress/compressors/lz77support/LZ77Compressor.java
+++ /dev/null
@@ -1,559 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lz77support;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-/**
- * Helper class for compression algorithms that use the ideas of LZ77.
- *
- *
Most LZ77 derived algorithms split input data into blocks of
- * uncompressed data (called literal blocks) and back-references
- * (pairs of offsets and lengths) that state "add length
- * bytes that are the same as those already written starting
- * offset bytes before the current position. The details
- * of how those blocks and back-references are encoded are quite
- * different between the algorithms and some algorithms perform
- * additional steps (Huffman encoding in the case of DEFLATE for
- * example).
- *
- *
This class attempts to extract the core logic - finding
- * back-references - so it can be re-used. It follows the algorithm
- * explained in section 4 of RFC 1951 (DEFLATE) and currently doesn't
- * implement the "lazy match" optimization. The three-byte hash
- * function used in this class is the same as the one used by zlib and
- * InfoZIP's ZIP implementation of DEFLATE. The whole class is
- * strongly inspired by InfoZIP's implementation.
- *
- *
LZ77 is used vaguely here (as well as many other places that
- * talk about it :-), LZSS would likely be closer to the truth but
- * LZ77 has become the synonym for a whole family of algorithms.
- *
- *
The API consists of a compressor that is fed bytes
- * and emits {@link Block}s to a registered callback where the blocks
- * represent either {@link LiteralBlock literal blocks}, {@link
- * BackReference back-references} or {@link EOD end of data
- * markers}. In order to ensure the callback receives all information,
- * the {@code #finish} method must be used once all data has been fed
- * into the compressor.
- *
- *
Several parameters influence the outcome of the "compression":
- *
- *
- *
windowSize
the size of the sliding
- * window, must be a power of two - this determines the maximum
- * offset a back-reference can take. The compressor maintains a
- * buffer of twice of windowSize - real world values are
- * in the area of 32k.
- *
- *
minBackReferenceLength
- *
Minimal length of a back-reference found. A true minimum of 3 is
- * hard-coded inside of this implemention but bigger lengths can be
- * configured.
- *
- *
maxBackReferenceLength
- *
Maximal length of a back-reference found.
- *
- *
maxOffset
- *
Maximal offset of a back-reference.
- *
- *
maxLiteralLength
- *
Maximal length of a literal block.
- *
- *
- * @see "https://tools.ietf.org/html/rfc1951#section-4"
- * @since 1.14
- * @NotThreadSafe
- */
-public class LZ77Compressor {
-
- /**
- * Base class representing blocks the compressor may emit.
- *
- *
This class is not supposed to be subclassed by classes
- * outside of Commons Compress so it is considered internal and
- * changed that would break subclasses may get introduced with
- * future releases.
- */
- public static abstract class Block {
- /** Enumeration of the block types the compressor may emit. */
- public enum BlockType {
- LITERAL, BACK_REFERENCE, EOD
- }
- public abstract BlockType getType();
- }
-
- /**
- * Represents a literal block of data.
- *
- *
For performance reasons this encapsulates the real data, not
- * a copy of it. Don't modify the data and process it inside of
- * {@link Callback#accept} immediately as it will get overwritten
- * sooner or later.
- */
- public static final class LiteralBlock extends Block {
- private final byte[] data;
- private final int offset, length;
- public LiteralBlock(byte[] data, int offset, int length) {
- this.data = data;
- this.offset = offset;
- this.length = length;
- }
- /**
- * The literal data.
- *
- *
This returns a life view of the actual data in order to
- * avoid copying, modify the array at your own risk.
- * @return the data
- */
- public byte[] getData() {
- return data;
- }
- /**
- * Offset into data where the literal block starts.
- * @return the offset
- */
- public int getOffset() {
- return offset;
- }
- /**
- * Length of literal block.
- * @return the length
- */
- public int getLength() {
- return length;
- }
- @Override
- public BlockType getType() {
- return BlockType.LITERAL;
- }
- @Override
- public String toString() {
- return "LiteralBlock starting at " + offset + " with length " + length;
- }
- }
-
- /**
- * Represents a back-reference.
- */
- public static final class BackReference extends Block {
- private final int offset, length;
- public BackReference(int offset, int length) {
- this.offset = offset;
- this.length = length;
- }
- /**
- * Provides the offset of the back-reference.
- * @return the offset
- */
- public int getOffset() {
- return offset;
- }
- /**
- * Provides the length of the back-reference.
- * @return the length
- */
- public int getLength() {
- return length;
- }
- @Override
- public BlockType getType() {
- return BlockType.BACK_REFERENCE;
- }
- @Override
- public String toString() {
- return "BackReference with offset " + offset + " and length " + length;
- }
- }
-
- /** A simple "we are done" marker. */
- public static final class EOD extends Block {
- @Override
- public BlockType getType() {
- return BlockType.EOD;
- }
- }
-
- private static final Block THE_EOD = new EOD();
-
- /**
- * Callback invoked while the compressor processes data.
- *
- *
The callback is invoked on the same thread that receives the
- * bytes to compress and may be invoked multiple times during the
- * execution of {@link #compress} or {@link #finish}.
- */
- public interface Callback {
- /**
- * Consumes a block.
- * @param b the block to consume
- * @throws IOException in case of an error
- */
- void accept(Block b) throws IOException;
- }
-
- static final int NUMBER_OF_BYTES_IN_HASH = 3;
- private static final int NO_MATCH = -1;
-
- private final Parameters params;
- private final Callback callback;
-
- // the sliding window, twice as big as "windowSize" parameter
- private final byte[] window;
- // the head of hash-chain - indexed by hash-code, points to the
- // location inside of window of the latest sequence of bytes with
- // the given hash.
- private final int[] head;
- // for each window-location points to the latest earlier location
- // with the same hash. Only stores values for the latest
- // "windowSize" elements, the index is "window location modulo
- // windowSize".
- private final int[] prev;
-
- // bit mask used when indexing into prev
- private final int wMask;
-
- private boolean initialized = false;
- // the position inside of window that shall be encoded right now
- private int currentPosition;
- // the number of bytes available to compress including the one at
- // currentPosition
- private int lookahead = 0;
- // the hash of the three bytes stating at the current position
- private int insertHash = 0;
- // the position inside of the window where the current literal
- // block starts (in case we are inside of a literal block).
- private int blockStart = 0;
- // position of the current match
- private int matchStart = NO_MATCH;
- // number of missed insertString calls for the up to three last
- // bytes of the last match that can only be performed once more
- // data has been read
- private int missedInserts = 0;
-
- /**
- * Initializes a compressor with parameters and a callback.
- * @param params the parameters
- * @param callback the callback
- * @throws NullPointerException if either parameter is null
- */
- public LZ77Compressor(Parameters params, Callback callback) {
- if (params == null) {
- throw new NullPointerException("params must not be null");
- }
- if (callback == null) {
- throw new NullPointerException("callback must not be null");
- }
- this.params = params;
- this.callback = callback;
-
- final int wSize = params.getWindowSize();
- window = new byte[wSize * 2];
- wMask = wSize - 1;
- head = new int[HASH_SIZE];
- Arrays.fill(head, NO_MATCH);
- prev = new int[wSize];
- }
-
- /**
- * Feeds bytes into the compressor which in turn may emit zero or
- * more blocks to the callback during the execution of this
- * method.
- * @param data the data to compress - must not be null
- * @throws IOException if the callback throws an exception
- */
- public void compress(byte[] data) throws IOException {
- compress(data, 0, data.length);
- }
-
- /**
- * Feeds bytes into the compressor which in turn may emit zero or
- * more blocks to the callback during the execution of this
- * method.
- * @param data the data to compress - must not be null
- * @param off the start offset of the data
- * @param len the number of bytes to compress
- * @throws IOException if the callback throws an exception
- */
- public void compress(byte[] data, int off, int len) throws IOException {
- final int wSize = params.getWindowSize();
- while (len > wSize) { // chop into windowSize sized chunks
- doCompress(data, off, wSize);
- off += wSize;
- len -= wSize;
- }
- if (len > 0) {
- doCompress(data, off, len);
- }
- }
-
- /**
- * Tells the compressor to process all remaining data and signal
- * end of data to the callback.
- *
- *
The compressor will in turn emit at least one block ({@link
- * EOD}) but potentially multiple blocks to the callback during
- * the execution of this method.
- * @throws IOException if the callback throws an exception
- */
- public void finish() throws IOException {
- if (blockStart != currentPosition || lookahead > 0) {
- currentPosition += lookahead;
- flushLiteralBlock();
- }
- callback.accept(THE_EOD);
- }
-
- /**
- * Adds some initial data to fill the window with.
- *
- *
This is used if the stream has been cut into blocks and
- * back-references of one block may refer to data of the previous
- * block(s). One such example is the LZ4 frame format using block
- * dependency.
- *
- * @param data the data to fill the window with.
- * @throws IllegalStateException if the compressor has already started to accept data
- */
- public void prefill(byte[] data) {
- if (currentPosition != 0 || lookahead != 0) {
- throw new IllegalStateException("the compressor has already started to accept data, can't prefill anymore");
- }
-
- // don't need more than windowSize for back-references
- final int len = Math.min(params.getWindowSize(), data.length);
- System.arraycopy(data, data.length - len, window, 0, len);
-
- if (len >= NUMBER_OF_BYTES_IN_HASH) {
- initialize();
- final int stop = len - NUMBER_OF_BYTES_IN_HASH + 1;
- for (int i = 0; i < stop; i++) {
- insertString(i);
- }
- missedInserts = NUMBER_OF_BYTES_IN_HASH - 1;
- } else { // not enough data to hash anything
- missedInserts = len;
- }
- blockStart = currentPosition = len;
- }
-
- // we use a 15 bit hashcode as calculated in updateHash
- private static final int HASH_SIZE = 1 << 15;
- private static final int HASH_MASK = HASH_SIZE - 1;
- private static final int H_SHIFT = 5;
-
- /**
- * Assumes we are calculating the hash for three consecutive bytes
- * as a rolling hash, i.e. for bytes ABCD if H is the hash of ABC
- * the new hash for BCD is nextHash(H, D).
- *
- *
The hash is shifted by five bits on each update so all
- * effects of A have been swapped after the third update.
- */
- private int nextHash(int oldHash, byte nextByte) {
- final int nextVal = nextByte & 0xFF;
- return ((oldHash << H_SHIFT) ^ nextVal) & HASH_MASK;
- }
-
- // performs the actual algorithm with the pre-condition len <= windowSize
- private void doCompress(byte[] data, int off, int len) throws IOException {
- int spaceLeft = window.length - currentPosition - lookahead;
- if (len > spaceLeft) {
- slide();
- }
- System.arraycopy(data, off, window, currentPosition + lookahead, len);
- lookahead += len;
- if (!initialized && lookahead >= params.getMinBackReferenceLength()) {
- initialize();
- }
- if (initialized) {
- compress();
- }
- }
-
- private void slide() throws IOException {
- final int wSize = params.getWindowSize();
- if (blockStart != currentPosition && blockStart < wSize) {
- flushLiteralBlock();
- blockStart = currentPosition;
- }
- System.arraycopy(window, wSize, window, 0, wSize);
- currentPosition -= wSize;
- matchStart -= wSize;
- blockStart -= wSize;
- for (int i = 0; i < HASH_SIZE; i++) {
- int h = head[i];
- head[i] = h >= wSize ? h - wSize : NO_MATCH;
- }
- for (int i = 0; i < wSize; i++) {
- int p = prev[i];
- prev[i] = p >= wSize ? p - wSize : NO_MATCH;
- }
- }
-
- private void initialize() {
- for (int i = 0; i < NUMBER_OF_BYTES_IN_HASH - 1; i++) {
- insertHash = nextHash(insertHash, window[i]);
- }
- initialized = true;
- }
-
- private void compress() throws IOException {
- final int minMatch = params.getMinBackReferenceLength();
- final boolean lazy = params.getLazyMatching();
- final int lazyThreshold = params.getLazyMatchingThreshold();
-
- while (lookahead >= minMatch) {
- catchUpMissedInserts();
- int matchLength = 0;
- int hashHead = insertString(currentPosition);
- if (hashHead != NO_MATCH && hashHead - currentPosition <= params.getMaxOffset()) {
- // sets matchStart as a side effect
- matchLength = longestMatch(hashHead);
-
- if (lazy && matchLength <= lazyThreshold && lookahead > minMatch) {
- // try to find a longer match using the next position
- matchLength = longestMatchForNextPosition(matchLength);
- }
- }
- if (matchLength >= minMatch) {
- if (blockStart != currentPosition) {
- // emit preceeding literal block
- flushLiteralBlock();
- blockStart = NO_MATCH;
- }
- flushBackReference(matchLength);
- insertStringsInMatch(matchLength);
- lookahead -= matchLength;
- currentPosition += matchLength;
- blockStart = currentPosition;
- } else {
- // no match, append to current or start a new literal
- lookahead--;
- currentPosition++;
- if (currentPosition - blockStart >= params.getMaxLiteralLength()) {
- flushLiteralBlock();
- blockStart = currentPosition;
- }
- }
- }
- }
-
- /**
- * Inserts the current three byte sequence into the dictionary and
- * returns the previous head of the hash-chain.
- *
- *
Updates insertHash and prev as a
- * side effect.
- */
- private int insertString(int pos) {
- insertHash = nextHash(insertHash, window[pos - 1 + NUMBER_OF_BYTES_IN_HASH]);
- int hashHead = head[insertHash];
- prev[pos & wMask] = hashHead;
- head[insertHash] = pos;
- return hashHead;
- }
-
- private int longestMatchForNextPosition(final int prevMatchLength) {
- // save a bunch of values to restore them if the next match isn't better than the current one
- final int prevMatchStart = matchStart;
- final int prevInsertHash = insertHash;
-
- lookahead--;
- currentPosition++;
- int hashHead = insertString(currentPosition);
- final int prevHashHead = prev[currentPosition & wMask];
- int matchLength = longestMatch(hashHead);
-
- if (matchLength <= prevMatchLength) {
- // use the first match, as the next one isn't any better
- matchLength = prevMatchLength;
- matchStart = prevMatchStart;
-
- // restore modified values
- head[insertHash] = prevHashHead;
- insertHash = prevInsertHash;
- currentPosition--;
- lookahead++;
- }
- return matchLength;
- }
-
- private void insertStringsInMatch(int matchLength) {
- // inserts strings contained in current match
- // insertString inserts the byte 2 bytes after position, which may not yet be available -> missedInserts
- final int stop = Math.min(matchLength - 1, lookahead - NUMBER_OF_BYTES_IN_HASH);
- // currentPosition has been inserted already
- for (int i = 1; i <= stop; i++) {
- insertString(currentPosition + i);
- }
- missedInserts = matchLength - stop - 1;
- }
-
- private void catchUpMissedInserts() {
- while (missedInserts > 0) {
- insertString(currentPosition - missedInserts--);
- }
- }
-
- private void flushBackReference(int matchLength) throws IOException {
- callback.accept(new BackReference(currentPosition - matchStart, matchLength));
- }
-
- private void flushLiteralBlock() throws IOException {
- callback.accept(new LiteralBlock(window, blockStart, currentPosition - blockStart));
- }
-
- /**
- * Searches the hash chain for real matches and returns the length
- * of the longest match (0 if none were found) that isn't too far
- * away (WRT maxOffset).
- *
- *
Sets matchStart to the index of the start position of the
- * longest match as a side effect.
- */
- private int longestMatch(int matchHead) {
- final int minLength = params.getMinBackReferenceLength();
- int longestMatchLength = minLength - 1;
- final int maxPossibleLength = Math.min(params.getMaxBackReferenceLength(), lookahead);
- final int minIndex = Math.max(0, currentPosition - params.getMaxOffset());
- final int niceBackReferenceLength = Math.min(maxPossibleLength, params.getNiceBackReferenceLength());
- final int maxCandidates = params.getMaxCandidates();
- for (int candidates = 0; candidates < maxCandidates && matchHead >= minIndex; candidates++) {
- int currentLength = 0;
- for (int i = 0; i < maxPossibleLength; i++) {
- if (window[matchHead + i] != window[currentPosition + i]) {
- break;
- }
- currentLength++;
- }
- if (currentLength > longestMatchLength) {
- longestMatchLength = currentLength;
- matchStart = matchHead;
- if (currentLength >= niceBackReferenceLength) {
- // no need to search any further
- break;
- }
- }
- matchHead = prev[matchHead & wMask];
- }
- return longestMatchLength; // < minLength if no matches have been found, will be ignored in compress()
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lz77support/Parameters.java b/src/org/apache/commons/compress/compressors/lz77support/Parameters.java
deleted file mode 100644
index fe892f37f65..00000000000
--- a/src/org/apache/commons/compress/compressors/lz77support/Parameters.java
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lz77support;
-
-/**
- * Parameters of the {@link LZ77Compressor compressor}.
- */
-public final class Parameters {
- /**
- * The hard-coded absolute minimal length of a back-reference.
- */
- public static final int TRUE_MIN_BACK_REFERENCE_LENGTH = LZ77Compressor.NUMBER_OF_BYTES_IN_HASH;
-
- /**
- * Initializes the builder for the compressor's parameters with a
- * minBackReferenceLength of 3 and max*Length
- * equal to windowSize - 1.
- *
- *
It is recommended to not use this method directly but rather
- * tune a pre-configured builder created by a format specific
- * factory like {@link
- * org.apache.commons.compress.compressors.snappy.SnappyCompressorOutputStream#createParameterBuilder}.
- *
- * @param windowSize the size of the sliding window - this
- * determines the maximum offset a back-reference can take. Must
- * be a power of two.
- * @throws IllegalArgumentException if windowSize is not a power of two.
- * @return a builder configured for the given window size
- */
- public static Builder builder(int windowSize) {
- return new Builder(windowSize);
- }
-
- /**
- * Builder for {@link Parameters} instances.
- */
- public static class Builder {
- private final int windowSize;
- private int minBackReferenceLength, maxBackReferenceLength, maxOffset, maxLiteralLength;
- private Integer niceBackReferenceLength, maxCandidates, lazyThreshold;
- private Boolean lazyMatches;
-
- private Builder(int windowSize) {
- if (windowSize < 2 || !isPowerOfTwo(windowSize)) {
- throw new IllegalArgumentException("windowSize must be a power of two");
- }
- this.windowSize = windowSize;
- minBackReferenceLength = TRUE_MIN_BACK_REFERENCE_LENGTH;
- maxBackReferenceLength = windowSize - 1;
- maxOffset = windowSize - 1;
- maxLiteralLength = windowSize;
- }
-
- /**
- * Sets the mininal length of a back-reference.
- *
- *
Ensures maxBackReferenceLength is not
- * smaller than minBackReferenceLength.
- *
- *
It is recommended to not use this method directly but
- * rather tune a pre-configured builder created by a format
- * specific factory like {@link
- * org.apache.commons.compress.compressors.snappy.SnappyCompressorOutputStream#createParameterBuilder}.
- *
- * @param minBackReferenceLength the minimal length of a back-reference found. A
- * true minimum of 3 is hard-coded inside of this implemention
- * but bigger lengths can be configured.
- * @throws IllegalArgumentException if windowSize
- * is smaller than minBackReferenceLength.
- * @return the builder
- */
- public Builder withMinBackReferenceLength(int minBackReferenceLength) {
- this.minBackReferenceLength = Math.max(TRUE_MIN_BACK_REFERENCE_LENGTH, minBackReferenceLength);
- if (windowSize < this.minBackReferenceLength) {
- throw new IllegalArgumentException("minBackReferenceLength can't be bigger than windowSize");
- }
- if (maxBackReferenceLength < this.minBackReferenceLength) {
- maxBackReferenceLength = this.minBackReferenceLength;
- }
- return this;
- }
-
- /**
- * Sets the maximal length of a back-reference.
- *
- *
It is recommended to not use this method directly but
- * rather tune a pre-configured builder created by a format
- * specific factory like {@link
- * org.apache.commons.compress.compressors.snappy.SnappyCompressorOutputStream#createParameterBuilder}.
- *
- * @param maxBackReferenceLength maximal length of a
- * back-reference found. A value smaller than
- * minBackReferenceLength is interpreted as
- * minBackReferenceLength. maxBackReferenceLength
- * is capped at windowSize - 1.
- * @return the builder
- */
- public Builder withMaxBackReferenceLength(int maxBackReferenceLength) {
- this.maxBackReferenceLength = maxBackReferenceLength < minBackReferenceLength ? minBackReferenceLength
- : Math.min(maxBackReferenceLength, windowSize - 1);
- return this;
- }
-
- /**
- * Sets the maximal offset of a back-reference.
- *
- *
It is recommended to not use this method directly but
- * rather tune a pre-configured builder created by a format
- * specific factory like {@link
- * org.apache.commons.compress.compressors.snappy.SnappyCompressorOutputStream#createParameterBuilder}.
- *
- * @param maxOffset maximal offset of a back-reference. A
- * non-positive value as well as values bigger than
- * windowSize - 1 are interpreted as windowSize
- * - 1.
- * @return the builder
- */
- public Builder withMaxOffset(int maxOffset) {
- this.maxOffset = maxOffset < 1 ? windowSize - 1 : Math.min(maxOffset, windowSize - 1);
- return this;
- }
-
- /**
- * Sets the maximal length of a literal block.
- *
- *
It is recommended to not use this method directly but
- * rather tune a pre-configured builder created by a format
- * specific factory like {@link
- * org.apache.commons.compress.compressors.snappy.SnappyCompressorOutputStream#createParameterBuilder}.
- *
- * @param maxLiteralLength maximal length of a literal
- * block. Negative numbers and 0 as well as values bigger than
- * windowSize are interpreted as
- * windowSize.
- * @return the builder
- */
- public Builder withMaxLiteralLength(int maxLiteralLength) {
- this.maxLiteralLength = maxLiteralLength < 1 ? windowSize
- : Math.min(maxLiteralLength, windowSize);
- return this;
- }
-
- /**
- * Sets the "nice length" of a back-reference.
- *
- *
When a back-references if this size has been found, stop searching for longer back-references.
- *
- *
This settings can be used to tune the tradeoff between compression speed and compression ratio.
- * @param niceLen the "nice length" of a back-reference
- * @return the builder
- */
- public Builder withNiceBackReferenceLength(int niceLen) {
- niceBackReferenceLength = niceLen;
- return this;
- }
-
- /**
- * Sets the maximum number of back-reference candidates that should be consulted.
- *
- *
This settings can be used to tune the tradeoff between compression speed and compression ratio.
- * @param maxCandidates maximum number of back-reference candidates
- * @return the builder
- */
- public Builder withMaxNumberOfCandidates(int maxCandidates) {
- this.maxCandidates = maxCandidates;
- return this;
- }
-
- /**
- * Sets whether lazy matching should be performed.
- *
- *
Lazy matching means that after a back-reference for a certain position has been found the compressor will
- * try to find a longer match for the next position.
- *
- *
Lazy matching is enabled by default and disabled when tuning for speed.
- * @param lazy whether lazy matching should be performed
- * @return the builder
- */
- public Builder withLazyMatching(boolean lazy) {
- lazyMatches = lazy;
- return this;
- }
-
- /**
- * Sets the threshold for lazy matching.
- *
- *
Even if lazy matching is enabled it will not be performed if the length of the back-reference found for
- * the current position is longer than this value.
- * @param threshold the threshold for lazy matching
- * @return the builder
- */
- public Builder withLazyThreshold(int threshold) {
- lazyThreshold = threshold;
- return this;
- }
-
- /**
- * Changes the default setting for "nice back-reference length" and "maximum number of candidates" for improved
- * compression speed at the cost of compression ratio.
- *
- *
Use this method after configuring "maximum back-reference length".
- * @return the builder
- */
- public Builder tunedForSpeed() {
- niceBackReferenceLength = Math.max(minBackReferenceLength, maxBackReferenceLength / 8);
- maxCandidates = Math.max(32, windowSize / 1024);
- lazyMatches = false;
- lazyThreshold = minBackReferenceLength;
- return this;
- }
-
- /**
- * Changes the default setting for "nice back-reference length" and "maximum number of candidates" for improved
- * compression ratio at the cost of compression speed.
- *
- *
Use this method after configuring "maximum back-reference length".
- * @return the builder
- */
- public Builder tunedForCompressionRatio() {
- niceBackReferenceLength = lazyThreshold = maxBackReferenceLength;
- maxCandidates = Math.max(32, windowSize / 16);
- lazyMatches = true;
- return this;
- }
-
- /**
- * Creates the {@link Parameters} instance.
- * @return the configured {@link Parameters} instance.
- */
- public Parameters build() {
- // default settings tuned for a compromise of good compression and acceptable speed
- int niceLen = niceBackReferenceLength != null ? niceBackReferenceLength
- : Math.max(minBackReferenceLength, maxBackReferenceLength / 2);
- int candidates = maxCandidates != null ? maxCandidates : Math.max(256, windowSize / 128);
- boolean lazy = lazyMatches == null || lazyMatches;
- int threshold = lazy ? (lazyThreshold != null ? lazyThreshold : niceLen) : minBackReferenceLength;
-
- return new Parameters(windowSize, minBackReferenceLength, maxBackReferenceLength,
- maxOffset, maxLiteralLength, niceLen, candidates, lazy, threshold);
- }
- }
-
- private final int windowSize, minBackReferenceLength, maxBackReferenceLength, maxOffset, maxLiteralLength,
- niceBackReferenceLength, maxCandidates, lazyThreshold;
- private final boolean lazyMatching;
-
- private Parameters(int windowSize, int minBackReferenceLength, int maxBackReferenceLength, int maxOffset,
- int maxLiteralLength, int niceBackReferenceLength, int maxCandidates, boolean lazyMatching,
- int lazyThreshold) {
- this.windowSize = windowSize;
- this.minBackReferenceLength = minBackReferenceLength;
- this.maxBackReferenceLength = maxBackReferenceLength;
- this.maxOffset = maxOffset;
- this.maxLiteralLength = maxLiteralLength;
- this.niceBackReferenceLength = niceBackReferenceLength;
- this.maxCandidates = maxCandidates;
- this.lazyMatching = lazyMatching;
- this.lazyThreshold = lazyThreshold;
- }
-
- /**
- * Gets the size of the sliding window - this determines the
- * maximum offset a back-reference can take.
- * @return the size of the sliding window
- */
- public int getWindowSize() {
- return windowSize;
- }
- /**
- * Gets the minimal length of a back-reference found.
- * @return the minimal length of a back-reference found
- */
- public int getMinBackReferenceLength() {
- return minBackReferenceLength;
- }
- /**
- * Gets the maximal length of a back-reference found.
- * @return the maximal length of a back-reference found
- */
- public int getMaxBackReferenceLength() {
- return maxBackReferenceLength;
- }
- /**
- * Gets the maximal offset of a back-reference found.
- * @return the maximal offset of a back-reference found
- */
- public int getMaxOffset() {
- return maxOffset;
- }
- /**
- * Gets the maximal length of a literal block.
- * @return the maximal length of a literal block
- */
- public int getMaxLiteralLength() {
- return maxLiteralLength;
- }
-
- /**
- * Gets the length of a back-reference that is considered nice enough to stop searching for longer ones.
- * @return the length of a back-reference that is considered nice enough to stop searching
- */
- public int getNiceBackReferenceLength() {
- return niceBackReferenceLength;
- }
-
- /**
- * Gets the maximum number of back-reference candidates to consider.
- * @return the maximum number of back-reference candidates to consider
- */
- public int getMaxCandidates() {
- return maxCandidates;
- }
-
- /**
- * Gets whether to perform lazy matching.
- * @return whether to perform lazy matching
- */
- public boolean getLazyMatching() {
- return lazyMatching;
- }
-
- /**
- * Gets the threshold for lazy matching.
- * @return the threshold for lazy matching
- */
- public int getLazyMatchingThreshold() {
- return lazyThreshold;
- }
-
- private static final boolean isPowerOfTwo(int x) {
- // pre-condition: x > 0
- return (x & (x - 1)) == 0;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lz77support/package.html b/src/org/apache/commons/compress/compressors/lz77support/package.html
deleted file mode 100644
index 951b1460a8e..00000000000
--- a/src/org/apache/commons/compress/compressors/lz77support/package.html
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
-
-
Provides utility classes for LZ77 based algorithms.
-
-
The classes in this package are currently used by the LZ4 and
- Snappy implementations but might also help implementing other
- algorithms that derive from LZ77 and LZSS.
-
-
-
diff --git a/src/org/apache/commons/compress/compressors/lzma/LZMACompressorInputStream.java b/src/org/apache/commons/compress/compressors/lzma/LZMACompressorInputStream.java
deleted file mode 100644
index 794e3a54010..00000000000
--- a/src/org/apache/commons/compress/compressors/lzma/LZMACompressorInputStream.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lzma;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.commons.compress.MemoryLimitException;
-import org.tukaani.xz.LZMAInputStream;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * LZMA decompressor.
- * @since 1.6
- */
-public class LZMACompressorInputStream extends CompressorInputStream
- implements InputStreamStatistics {
-
- private final CountingInputStream countingStream;
- private final InputStream in;
-
- /**
- * Creates a new input stream that decompresses LZMA-compressed data
- * from the specified input stream.
- *
- * @param inputStream where to read the compressed data
- *
- * @throws IOException if the input is not in the .lzma format,
- * the input is corrupt or truncated, the .lzma
- * headers specify sizes that are not supported
- * by this implementation, or the underlying
- * inputStream throws an exception
- */
- public LZMACompressorInputStream(final InputStream inputStream)
- throws IOException {
- in = new LZMAInputStream(countingStream = new CountingInputStream(inputStream), -1);
- }
-
- /**
- * Creates a new input stream that decompresses LZMA-compressed data
- * from the specified input stream.
- *
- * @param inputStream where to read the compressed data
- *
- * @param memoryLimitInKb calculated memory use threshold. Throws MemoryLimitException
- * if calculate memory use is above this threshold
- *
- * @throws IOException if the input is not in the .lzma format,
- * the input is corrupt or truncated, the .lzma
- * headers specify sizes that are not supported
- * by this implementation, or the underlying
- * inputStream throws an exception
- *
- * @since 1.14
- */
- public LZMACompressorInputStream(final InputStream inputStream, int memoryLimitInKb)
- throws IOException {
- try {
- in = new LZMAInputStream(countingStream = new CountingInputStream(inputStream), memoryLimitInKb);
- } catch (org.tukaani.xz.MemoryLimitException e) {
- //convert to commons-compress exception
- throw new MemoryLimitException(e.getMemoryNeeded(), e.getMemoryLimit(), e);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public int read() throws IOException {
- final int ret = in.read();
- count(ret == -1 ? 0 : 1);
- return ret;
- }
-
- /** {@inheritDoc} */
- @Override
- public int read(final byte[] buf, final int off, final int len) throws IOException {
- final int ret = in.read(buf, off, len);
- count(ret);
- return ret;
- }
-
- /** {@inheritDoc} */
- @Override
- public long skip(final long n) throws IOException {
- return IOUtils.skip(in, n);
- }
-
- /** {@inheritDoc} */
- @Override
- public int available() throws IOException {
- return in.available();
- }
-
- /** {@inheritDoc} */
- @Override
- public void close() throws IOException {
- in.close();
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return countingStream.getBytesRead();
- }
-
- /**
- * Checks if the signature matches what is expected for an lzma file.
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this stream is an lzma compressed stream, false otherwise
- *
- * @since 1.10
- */
- public static boolean matches(final byte[] signature, final int length) {
- return signature != null && length >= 3 &&
- signature[0] == 0x5d && signature[1] == 0 &&
- signature[2] == 0;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lzma/LZMACompressorOutputStream.java b/src/org/apache/commons/compress/compressors/lzma/LZMACompressorOutputStream.java
deleted file mode 100644
index e6bdfa4557a..00000000000
--- a/src/org/apache/commons/compress/compressors/lzma/LZMACompressorOutputStream.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lzma;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import org.tukaani.xz.LZMA2Options;
-import org.tukaani.xz.LZMAOutputStream;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-
-/**
- * LZMA compressor.
- * @since 1.13
- */
-public class LZMACompressorOutputStream extends CompressorOutputStream {
- private final LZMAOutputStream out;
-
- /**
- * Creates a LZMA compressor.
- *
- * @param outputStream the stream to wrap
- * @throws IOException on error
- */
- public LZMACompressorOutputStream(final OutputStream outputStream)
- throws IOException {
- out = new LZMAOutputStream(outputStream, new LZMA2Options(), -1);
- }
-
- /** {@inheritDoc} */
- @Override
- public void write(final int b) throws IOException {
- out.write(b);
- }
-
- /** {@inheritDoc} */
- @Override
- public void write(final byte[] buf, final int off, final int len) throws IOException {
- out.write(buf, off, len);
- }
-
- /**
- * Doesn't do anything as {@link LZMAOutputStream} doesn't support flushing.
- */
- @Override
- public void flush() throws IOException {
- }
-
- /**
- * Finishes compression without closing the underlying stream.
- * No more data can be written to this stream after finishing.
- * @throws IOException on error
- */
- public void finish() throws IOException {
- out.finish();
- }
-
- /** {@inheritDoc} */
- @Override
- public void close() throws IOException {
- out.close();
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lzma/LZMAUtils.java b/src/org/apache/commons/compress/compressors/lzma/LZMAUtils.java
deleted file mode 100644
index 8722e6d8906..00000000000
--- a/src/org/apache/commons/compress/compressors/lzma/LZMAUtils.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lzma;
-
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.commons.compress.compressors.FileNameUtil;
-
-/**
- * Utility code for the lzma compression format.
- * @ThreadSafe
- * @since 1.10
- */
-public class LZMAUtils {
-
- private static final FileNameUtil fileNameUtil;
-
- /**
- * LZMA Header Magic Bytes begin a LZMA file.
- */
- private static final byte[] HEADER_MAGIC = {
- (byte) 0x5D, 0, 0
- };
-
- enum CachedAvailability {
- DONT_CACHE, CACHED_AVAILABLE, CACHED_UNAVAILABLE
- }
-
- private static volatile CachedAvailability cachedLZMAAvailability;
-
- static {
- final Map uncompressSuffix = new HashMap<>();
- uncompressSuffix.put(".lzma", "");
- uncompressSuffix.put("-lzma", "");
- fileNameUtil = new FileNameUtil(uncompressSuffix, ".lzma");
- cachedLZMAAvailability = CachedAvailability.DONT_CACHE;
- try {
- Class.forName("org.osgi.framework.BundleEvent");
- } catch (final Exception ex) {
- setCacheLZMAAvailablity(true);
- }
- }
-
- /** Private constructor to prevent instantiation of this utility class. */
- private LZMAUtils() {
- }
-
- /**
- * Checks if the signature matches what is expected for a .lzma file.
- *
- * @param signature the bytes to check
- * @param length the number of bytes to check
- * @return true if signature matches the .lzma magic bytes, false otherwise
- */
- public static boolean matches(final byte[] signature, final int length) {
- if (length < HEADER_MAGIC.length) {
- return false;
- }
-
- for (int i = 0; i < HEADER_MAGIC.length; ++i) {
- if (signature[i] != HEADER_MAGIC[i]) {
- return false;
- }
- }
-
- return true;
- }
-
- /**
- * Are the classes required to support LZMA compression available?
- * @return true if the classes required to support LZMA
- * compression are available
- */
- public static boolean isLZMACompressionAvailable() {
- final CachedAvailability cachedResult = cachedLZMAAvailability;
- if (cachedResult != CachedAvailability.DONT_CACHE) {
- return cachedResult == CachedAvailability.CACHED_AVAILABLE;
- }
- return internalIsLZMACompressionAvailable();
- }
-
- private static boolean internalIsLZMACompressionAvailable() {
- try {
- LZMACompressorInputStream.matches(null, 0);
- return true;
- } catch (final NoClassDefFoundError error) {
- return false;
- }
- }
-
- /**
- * Detects common lzma suffixes in the given filename.
- *
- * @param filename name of a file
- * @return {@code true} if the filename has a common lzma suffix,
- * {@code false} otherwise
- */
- public static boolean isCompressedFilename(final String filename) {
- return fileNameUtil.isCompressedFilename(filename);
- }
-
- /**
- * Maps the given name of a lzma-compressed file to the name that
- * the file should have after uncompression. Any filenames with
- * the generic ".lzma" suffix (or any other generic lzma suffix)
- * is mapped to a name without that suffix. If no lzma suffix is
- * detected, then the filename is returned unmapped.
- *
- * @param filename name of a file
- * @return name of the corresponding uncompressed file
- */
- public static String getUncompressedFilename(final String filename) {
- return fileNameUtil.getUncompressedFilename(filename);
- }
-
- /**
- * Maps the given filename to the name that the file should have after
- * compression with lzma.
- *
- * @param filename name of a file
- * @return name of the corresponding compressed file
- */
- public static String getCompressedFilename(final String filename) {
- return fileNameUtil.getCompressedFilename(filename);
- }
-
- /**
- * Whether to cache the result of the LZMA check.
- *
- *
This defaults to {@code false} in an OSGi environment and {@code true} otherwise.
- * @param doCache whether to cache the result
- */
- public static void setCacheLZMAAvailablity(final boolean doCache) {
- if (!doCache) {
- cachedLZMAAvailability = CachedAvailability.DONT_CACHE;
- } else if (cachedLZMAAvailability == CachedAvailability.DONT_CACHE) {
- final boolean hasLzma = internalIsLZMACompressionAvailable();
- cachedLZMAAvailability = hasLzma ? CachedAvailability.CACHED_AVAILABLE // NOSONAR
- : CachedAvailability.CACHED_UNAVAILABLE;
- }
- }
-
- // only exists to support unit tests
- static CachedAvailability getCachedLZMAAvailability() {
- return cachedLZMAAvailability;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/lzma/package.html b/src/org/apache/commons/compress/compressors/lzma/package.html
deleted file mode 100644
index f3b54730f6a..00000000000
--- a/src/org/apache/commons/compress/compressors/lzma/package.html
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
-
Provides stream classes using the "stand-alone" LZMA
- algorithm.
-
-
The classes in this package are wrappers around stream classes
- provided by the public
- domain XZ for Java
- library.
-
-
In general you should prefer the more modern and robust XZ
- format over stand-alone LZMA compression.
-
-
diff --git a/src/org/apache/commons/compress/compressors/lzw/LZWInputStream.java b/src/org/apache/commons/compress/compressors/lzw/LZWInputStream.java
deleted file mode 100644
index a5e512c0528..00000000000
--- a/src/org/apache/commons/compress/compressors/lzw/LZWInputStream.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.lzw;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteOrder;
-
-import org.apache.commons.compress.MemoryLimitException;
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.BitInputStream;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- *
Generic LZW implementation. It is used internally for
- * the Z decompressor and the Unshrinking Zip file compression method,
- * but may be useful for third-party projects in implementing their own LZW variations.
- *
- * @NotThreadSafe
- * @since 1.10
- */
-public abstract class LZWInputStream extends CompressorInputStream implements InputStreamStatistics {
- protected static final int DEFAULT_CODE_SIZE = 9;
- protected static final int UNUSED_PREFIX = -1;
-
- private final byte[] oneByte = new byte[1];
-
- protected final BitInputStream in;
- private int clearCode = -1;
- private int codeSize = DEFAULT_CODE_SIZE;
- private byte previousCodeFirstChar;
- private int previousCode = UNUSED_PREFIX;
- private int tableSize;
- private int[] prefixes;
- private byte[] characters;
- private byte[] outputStack;
- private int outputStackLocation;
-
- protected LZWInputStream(final InputStream inputStream, final ByteOrder byteOrder) {
- this.in = new BitInputStream(inputStream, byteOrder);
- }
-
- @Override
- public void close() throws IOException {
- in.close();
- }
-
- @Override
- public int read() throws IOException {
- final int ret = read(oneByte);
- if (ret < 0) {
- return ret;
- }
- return 0xff & oneByte[0];
- }
-
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- int bytesRead = readFromStack(b, off, len);
- while (len - bytesRead > 0) {
- final int result = decompressNextSymbol();
- if (result < 0) {
- if (bytesRead > 0) {
- count(bytesRead);
- return bytesRead;
- }
- return result;
- }
- bytesRead += readFromStack(b, off + bytesRead, len - bytesRead);
- }
- count(bytesRead);
- return bytesRead;
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return in.getBytesRead();
- }
-
- /**
- * Read the next code and expand it.
- * @return the expanded next code
- * @throws IOException on error
- */
- protected abstract int decompressNextSymbol() throws IOException;
-
- /**
- * Add a new entry to the dictionary.
- * @param previousCode the previous code
- * @param character the next character to append
- * @return the new code
- * @throws IOException on error
- */
- protected abstract int addEntry(int previousCode, byte character)
- throws IOException;
-
- /**
- * Sets the clear code based on the code size.
- * @param codeSize code size
- */
- protected void setClearCode(final int codeSize) {
- clearCode = (1 << (codeSize - 1));
- }
-
- /**
- * Initializes the arrays based on the maximum code size.
- * First checks that the estimated memory usage is below memoryLimitInKb
- *
- * @param maxCodeSize maximum code size
- * @param memoryLimitInKb maximum allowed estimated memory usage in Kb
- * @throws MemoryLimitException if estimated memory usage is greater than memoryLimitInKb
- */
- protected void initializeTables(final int maxCodeSize, final int memoryLimitInKb)
- throws MemoryLimitException {
-
- if (memoryLimitInKb > -1) {
- final int maxTableSize = 1 << maxCodeSize;
- //account for potential overflow
- long memoryUsageInBytes = (long) maxTableSize * 6;//(4 (prefixes) + 1 (characters) +1 (outputStack))
- long memoryUsageInKb = memoryUsageInBytes >> 10;
-
- if (memoryUsageInKb > memoryLimitInKb) {
- throw new MemoryLimitException(memoryUsageInKb, memoryLimitInKb);
- }
- }
- initializeTables(maxCodeSize);
- }
-
- /**
- * Initializes the arrays based on the maximum code size.
- * @param maxCodeSize maximum code size
- */
- protected void initializeTables(final int maxCodeSize) {
- final int maxTableSize = 1 << maxCodeSize;
- prefixes = new int[maxTableSize];
- characters = new byte[maxTableSize];
- outputStack = new byte[maxTableSize];
- outputStackLocation = maxTableSize;
- final int max = 1 << 8;
- for (int i = 0; i < max; i++) {
- prefixes[i] = -1;
- characters[i] = (byte) i;
- }
- }
-
- /**
- * Reads the next code from the stream.
- * @return the next code
- * @throws IOException on error
- */
- protected int readNextCode() throws IOException {
- if (codeSize > 31) {
- throw new IllegalArgumentException("code size must not be bigger than 31");
- }
- return (int) in.readBits(codeSize);
- }
-
- /**
- * Adds a new entry if the maximum table size hasn't been exceeded
- * and returns the new index.
- * @param previousCode the previous code
- * @param character the character to append
- * @param maxTableSize the maximum table size
- * @return the new code
- */
- protected int addEntry(final int previousCode, final byte character, final int maxTableSize) {
- if (tableSize < maxTableSize) {
- prefixes[tableSize] = previousCode;
- characters[tableSize] = character;
- return tableSize++;
- }
- return -1;
- }
-
- /**
- * Add entry for repeat of previousCode we haven't added, yet.
- * @return new code for a repeat of the previous code
- * @throws IOException on error
- */
- protected int addRepeatOfPreviousCode() throws IOException {
- if (previousCode == -1) {
- // can't have a repeat for the very first code
- throw new IOException("The first code can't be a reference to its preceding code");
- }
- return addEntry(previousCode, previousCodeFirstChar);
- }
-
- /**
- * Expands the entry with index code to the output stack and may
- * create a new entry
- * @param code the code
- * @param addedUnfinishedEntry whether unfinished entries have been added
- * @return the new location of the output stack
- * @throws IOException on error
- */
- protected int expandCodeToOutputStack(final int code, final boolean addedUnfinishedEntry)
- throws IOException {
- for (int entry = code; entry >= 0; entry = prefixes[entry]) {
- outputStack[--outputStackLocation] = characters[entry];
- }
- if (previousCode != -1 && !addedUnfinishedEntry) {
- addEntry(previousCode, outputStack[outputStackLocation]);
- }
- previousCode = code;
- previousCodeFirstChar = outputStack[outputStackLocation];
- return outputStackLocation;
- }
-
- private int readFromStack(final byte[] b, final int off, final int len) {
- final int remainingInStack = outputStack.length - outputStackLocation;
- if (remainingInStack > 0) {
- final int maxLength = Math.min(remainingInStack, len);
- System.arraycopy(outputStack, outputStackLocation, b, off, maxLength);
- outputStackLocation += maxLength;
- return maxLength;
- }
- return 0;
- }
-
- protected int getCodeSize() {
- return codeSize;
- }
-
- protected void resetCodeSize() {
- setCodeSize(DEFAULT_CODE_SIZE);
- }
-
- protected void setCodeSize(final int cs) {
- this.codeSize = cs;
- }
-
- protected void incrementCodeSize() {
- codeSize++;
- }
-
- protected void resetPreviousCode() {
- this.previousCode = -1;
- }
-
- protected int getPrefix(final int offset) {
- return prefixes[offset];
- }
-
- protected void setPrefix(final int offset, final int value) {
- prefixes[offset] = value;
- }
-
- protected int getPrefixesLength() {
- return prefixes.length;
- }
-
- protected int getClearCode() {
- return clearCode;
- }
-
- protected int getTableSize() {
- return tableSize;
- }
-
- protected void setTableSize(final int newSize) {
- tableSize = newSize;
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/lzw/package.html b/src/org/apache/commons/compress/compressors/lzw/package.html
deleted file mode 100644
index 3c45ca62a0d..00000000000
--- a/src/org/apache/commons/compress/compressors/lzw/package.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
-
Generic LZW implementation.
-
-
diff --git a/src/org/apache/commons/compress/compressors/pack200/InMemoryCachingStreamBridge.java b/src/org/apache/commons/compress/compressors/pack200/InMemoryCachingStreamBridge.java
deleted file mode 100644
index e1fdc2cbac6..00000000000
--- a/src/org/apache/commons/compress/compressors/pack200/InMemoryCachingStreamBridge.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.pack200;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * StreamSwitcher that caches all data written to the output side in
- * memory.
- * @since 1.3
- */
-class InMemoryCachingStreamBridge extends StreamBridge {
- InMemoryCachingStreamBridge() {
- super(new ByteArrayOutputStream());
- }
-
- @Override
- InputStream getInputView() throws IOException {
- return new ByteArrayInputStream(((ByteArrayOutputStream) out)
- .toByteArray());
- }
-}
\ No newline at end of file
diff --git a/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorInputStream.java b/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorInputStream.java
deleted file mode 100644
index 04fdc2bb6f7..00000000000
--- a/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorInputStream.java
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.pack200;
-
-import java.io.File;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Map;
-import java.util.jar.JarOutputStream;
-import java.util.jar.Pack200;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-
-/**
- * An input stream that decompresses from the Pack200 format to be read
- * as any other stream.
- *
- *
The {@link CompressorInputStream#getCount getCount} and {@link
- * CompressorInputStream#getBytesRead getBytesRead} methods always
- * return 0.
- *
- * @NotThreadSafe
- * @since 1.3
- */
-public class Pack200CompressorInputStream extends CompressorInputStream {
- private final InputStream originalInput;
- private final StreamBridge streamBridge;
-
- /**
- * Decompresses the given stream, caching the decompressed data in
- * memory.
- *
- *
When reading from a file the File-arg constructor may
- * provide better performance.
- *
- * @param in the InputStream from which this object should be created
- * @throws IOException if reading fails
- */
- public Pack200CompressorInputStream(final InputStream in)
- throws IOException {
- this(in, Pack200Strategy.IN_MEMORY);
- }
-
- /**
- * Decompresses the given stream using the given strategy to cache
- * the results.
- *
- *
When reading from a file the File-arg constructor may
- * provide better performance.
- *
- * @param in the InputStream from which this object should be created
- * @param mode the strategy to use
- * @throws IOException if reading fails
- */
- public Pack200CompressorInputStream(final InputStream in,
- final Pack200Strategy mode)
- throws IOException {
- this(in, null, mode, null);
- }
-
- /**
- * Decompresses the given stream, caching the decompressed data in
- * memory and using the given properties.
- *
- *
When reading from a file the File-arg constructor may
- * provide better performance.
- *
- * @param in the InputStream from which this object should be created
- * @param props Pack200 properties to use
- * @throws IOException if reading fails
- */
- public Pack200CompressorInputStream(final InputStream in,
- final Map props)
- throws IOException {
- this(in, Pack200Strategy.IN_MEMORY, props);
- }
-
- /**
- * Decompresses the given stream using the given strategy to cache
- * the results and the given properties.
- *
- *
When reading from a file the File-arg constructor may
- * provide better performance.
- *
- * @param in the InputStream from which this object should be created
- * @param mode the strategy to use
- * @param props Pack200 properties to use
- * @throws IOException if reading fails
- */
- public Pack200CompressorInputStream(final InputStream in,
- final Pack200Strategy mode,
- final Map props)
- throws IOException {
- this(in, null, mode, props);
- }
-
- /**
- * Decompresses the given file, caching the decompressed data in
- * memory.
- *
- * @param f the file to decompress
- * @throws IOException if reading fails
- */
- public Pack200CompressorInputStream(final File f) throws IOException {
- this(f, Pack200Strategy.IN_MEMORY);
- }
-
- /**
- * Decompresses the given file using the given strategy to cache
- * the results.
- *
- * @param f the file to decompress
- * @param mode the strategy to use
- * @throws IOException if reading fails
- */
- public Pack200CompressorInputStream(final File f, final Pack200Strategy mode)
- throws IOException {
- this(null, f, mode, null);
- }
-
- /**
- * Decompresses the given file, caching the decompressed data in
- * memory and using the given properties.
- *
- * @param f the file to decompress
- * @param props Pack200 properties to use
- * @throws IOException if reading fails
- */
- public Pack200CompressorInputStream(final File f,
- final Map props)
- throws IOException {
- this(f, Pack200Strategy.IN_MEMORY, props);
- }
-
- /**
- * Decompresses the given file using the given strategy to cache
- * the results and the given properties.
- *
- * @param f the file to decompress
- * @param mode the strategy to use
- * @param props Pack200 properties to use
- * @throws IOException if reading fails
- */
- public Pack200CompressorInputStream(final File f, final Pack200Strategy mode,
- final Map props)
- throws IOException {
- this(null, f, mode, props);
- }
-
- private Pack200CompressorInputStream(final InputStream in, final File f,
- final Pack200Strategy mode,
- final Map props)
- throws IOException {
- originalInput = in;
- streamBridge = mode.newStreamBridge();
- try (final JarOutputStream jarOut = new JarOutputStream(streamBridge)) {
- final Pack200.Unpacker u = Pack200.newUnpacker();
- if (props != null) {
- u.properties().putAll(props);
- }
- if (f == null) {
- u.unpack(new FilterInputStream(in) {
- @Override
- public void close() {
- // unpack would close this stream but we
- // want to give the user code more control
- }
- }, jarOut);
- } else {
- u.unpack(f, jarOut);
- }
- }
- }
-
- @Override
- public int read() throws IOException {
- return streamBridge.getInput().read();
- }
-
- @Override
- public int read(final byte[] b) throws IOException {
- return streamBridge.getInput().read(b);
- }
-
- @Override
- public int read(final byte[] b, final int off, final int count) throws IOException {
- return streamBridge.getInput().read(b, off, count);
- }
-
- @Override
- public int available() throws IOException {
- return streamBridge.getInput().available();
- }
-
- @Override
- public boolean markSupported() {
- try {
- return streamBridge.getInput().markSupported();
- } catch (final IOException ex) {
- return false;
- }
- }
-
- @Override
- public void mark(final int limit) {
- try {
- streamBridge.getInput().mark(limit);
- } catch (final IOException ex) {
- throw new RuntimeException(ex); //NOSONAR
- }
- }
-
- @Override
- public void reset() throws IOException {
- streamBridge.getInput().reset();
- }
-
- @Override
- public long skip(final long count) throws IOException {
- return IOUtils.skip(streamBridge.getInput(), count);
- }
-
- @Override
- public void close() throws IOException {
- try {
- streamBridge.stop();
- } finally {
- if (originalInput != null) {
- originalInput.close();
- }
- }
- }
-
- private static final byte[] CAFE_DOOD = new byte[] {
- (byte) 0xCA, (byte) 0xFE, (byte) 0xD0, (byte) 0x0D
- };
- private static final int SIG_LENGTH = CAFE_DOOD.length;
-
- /**
- * Checks if the signature matches what is expected for a pack200
- * file (0xCAFED00D).
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this stream is a pack200 compressed stream,
- * false otherwise
- */
- public static boolean matches(final byte[] signature, final int length) {
- if (length < SIG_LENGTH) {
- return false;
- }
-
- for (int i = 0; i < SIG_LENGTH; i++) {
- if (signature[i] != CAFE_DOOD[i]) {
- return false;
- }
- }
-
- return true;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorOutputStream.java b/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorOutputStream.java
deleted file mode 100644
index ff43a94a98a..00000000000
--- a/src/org/apache/commons/compress/compressors/pack200/Pack200CompressorOutputStream.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.pack200;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Map;
-import java.util.jar.JarInputStream;
-import java.util.jar.Pack200;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-
-/**
- * An output stream that compresses using the Pack200 format.
- *
- * @NotThreadSafe
- * @since 1.3
- */
-public class Pack200CompressorOutputStream extends CompressorOutputStream {
- private boolean finished = false;
- private final OutputStream originalOutput;
- private final StreamBridge streamBridge;
- private final Map properties;
-
- /**
- * Compresses the given stream, caching the compressed data in
- * memory.
- *
- * @param out the stream to write to
- * @throws IOException if writing fails
- */
- public Pack200CompressorOutputStream(final OutputStream out)
- throws IOException {
- this(out, Pack200Strategy.IN_MEMORY);
- }
-
- /**
- * Compresses the given stream using the given strategy to cache
- * the results.
- *
- * @param out the stream to write to
- * @param mode the strategy to use
- * @throws IOException if writing fails
- */
- public Pack200CompressorOutputStream(final OutputStream out,
- final Pack200Strategy mode)
- throws IOException {
- this(out, mode, null);
- }
-
- /**
- * Compresses the given stream, caching the compressed data in
- * memory and using the given properties.
- *
- * @param out the stream to write to
- * @param props Pack200 properties to use
- * @throws IOException if writing fails
- */
- public Pack200CompressorOutputStream(final OutputStream out,
- final Map props)
- throws IOException {
- this(out, Pack200Strategy.IN_MEMORY, props);
- }
-
- /**
- * Compresses the given stream using the given strategy to cache
- * the results and the given properties.
- *
- * @param out the stream to write to
- * @param mode the strategy to use
- * @param props Pack200 properties to use
- * @throws IOException if writing fails
- */
- public Pack200CompressorOutputStream(final OutputStream out,
- final Pack200Strategy mode,
- final Map props)
- throws IOException {
- originalOutput = out;
- streamBridge = mode.newStreamBridge();
- properties = props;
- }
-
- @Override
- public void write(final int b) throws IOException {
- streamBridge.write(b);
- }
-
- @Override
- public void write(final byte[] b) throws IOException {
- streamBridge.write(b);
- }
-
- @Override
- public void write(final byte[] b, final int from, final int length) throws IOException {
- streamBridge.write(b, from, length);
- }
-
- @Override
- public void close() throws IOException {
- finish();
- try {
- streamBridge.stop();
- } finally {
- originalOutput.close();
- }
- }
-
- public void finish() throws IOException {
- if (!finished) {
- finished = true;
- final Pack200.Packer p = Pack200.newPacker();
- if (properties != null) {
- p.properties().putAll(properties);
- }
- try (JarInputStream ji = new JarInputStream(streamBridge.getInput())) {
- p.pack(ji, originalOutput);
- }
- }
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/pack200/Pack200Strategy.java b/src/org/apache/commons/compress/compressors/pack200/Pack200Strategy.java
deleted file mode 100644
index dba19929662..00000000000
--- a/src/org/apache/commons/compress/compressors/pack200/Pack200Strategy.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.pack200;
-
-import java.io.IOException;
-
-/**
- * The different modes the Pack200 streams can use to wrap input and
- * output.
- * @since 1.3
- */
-public enum Pack200Strategy {
- /** Cache output in memory */
- IN_MEMORY() {
- @Override
- StreamBridge newStreamBridge() {
- return new InMemoryCachingStreamBridge();
- }
- },
- /** Cache output in a temporary file */
- TEMP_FILE() {
- @Override
- StreamBridge newStreamBridge() throws IOException {
- return new TempFileCachingStreamBridge();
- }
- };
-
- abstract StreamBridge newStreamBridge() throws IOException;
-}
\ No newline at end of file
diff --git a/src/org/apache/commons/compress/compressors/pack200/Pack200Utils.java b/src/org/apache/commons/compress/compressors/pack200/Pack200Utils.java
deleted file mode 100644
index 91a54ca5f93..00000000000
--- a/src/org/apache/commons/compress/compressors/pack200/Pack200Utils.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.pack200;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.jar.JarFile;
-import java.util.jar.JarOutputStream;
-import java.util.jar.Pack200;
-
-/**
- * Utility methods for Pack200.
- *
- * @ThreadSafe
- * @since 1.3
- */
-public class Pack200Utils {
- private Pack200Utils() { }
-
- /**
- * Normalizes a JAR archive in-place so it can be safely signed
- * and packed.
- *
- *
As stated in Pack200.Packer's
- * javadocs applying a Pack200 compression to a JAR archive will
- * in general make its sigantures invalid. In order to prepare a
- * JAR for signing it should be "normalized" by packing and
- * unpacking it. This is what this method does.
- *
- *
Note this methods implicitly sets the segment length to
- * -1.
- *
- * @param jar the JAR archive to normalize
- * @throws IOException if reading or writing fails
- */
- public static void normalize(final File jar)
- throws IOException {
- normalize(jar, jar, null);
- }
-
- /**
- * Normalizes a JAR archive in-place so it can be safely signed
- * and packed.
- *
- *
As stated in Pack200.Packer's
- * javadocs applying a Pack200 compression to a JAR archive will
- * in general make its sigantures invalid. In order to prepare a
- * JAR for signing it should be "normalized" by packing and
- * unpacking it. This is what this method does.
- *
- * @param jar the JAR archive to normalize
- * @param props properties to set for the pack operation. This
- * method will implicitly set the segment limit to -1.
- * @throws IOException if reading or writing fails
- */
- public static void normalize(final File jar, final Map props)
- throws IOException {
- normalize(jar, jar, props);
- }
-
- /**
- * Normalizes a JAR archive so it can be safely signed and packed.
- *
- *
As stated in Pack200.Packer's
- * javadocs applying a Pack200 compression to a JAR archive will
- * in general make its sigantures invalid. In order to prepare a
- * JAR for signing it should be "normalized" by packing and
- * unpacking it. This is what this method does.
- *
- *
This method does not replace the existing archive but creates
- * a new one.
- *
- *
Note this methods implicitly sets the segment length to
- * -1.
- *
- * @param from the JAR archive to normalize
- * @param to the normalized archive
- * @throws IOException if reading or writing fails
- */
- public static void normalize(final File from, final File to)
- throws IOException {
- normalize(from, to, null);
- }
-
- /**
- * Normalizes a JAR archive so it can be safely signed and packed.
- *
- *
As stated in Pack200.Packer's
- * javadocs applying a Pack200 compression to a JAR archive will
- * in general make its sigantures invalid. In order to prepare a
- * JAR for signing it should be "normalized" by packing and
- * unpacking it. This is what this method does.
- *
- *
This method does not replace the existing archive but creates
- * a new one.
- *
- * @param from the JAR archive to normalize
- * @param to the normalized archive
- * @param props properties to set for the pack operation. This
- * method will implicitly set the segment limit to -1.
- * @throws IOException if reading or writing fails
- */
- public static void normalize(final File from, final File to, Map props)
- throws IOException {
- if (props == null) {
- props = new HashMap<>();
- }
- props.put(Pack200.Packer.SEGMENT_LIMIT, "-1");
- final File tempFile = File.createTempFile("commons-compress", "pack200normalize");
- try {
- try (FileOutputStream fos = new FileOutputStream(tempFile);
- JarFile jarFile = new JarFile(from)) {
- final Pack200.Packer packer = Pack200.newPacker();
- packer.properties().putAll(props);
- packer.pack(jarFile, fos);
- }
- final Pack200.Unpacker unpacker = Pack200.newUnpacker();
- try (JarOutputStream jos = new JarOutputStream(new FileOutputStream(to))) {
- unpacker.unpack(tempFile, jos);
- }
- } finally {
- if (!tempFile.delete()) {
- tempFile.deleteOnExit();
- }
- }
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/pack200/StreamBridge.java b/src/org/apache/commons/compress/compressors/pack200/StreamBridge.java
deleted file mode 100644
index 9de3567cea9..00000000000
--- a/src/org/apache/commons/compress/compressors/pack200/StreamBridge.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.pack200;
-
-import java.io.FilterOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-/**
- * Provides an InputStream to read all data written to this
- * OutputStream.
- *
- * @ThreadSafe
- * @since 1.3
- */
-abstract class StreamBridge extends FilterOutputStream {
- private InputStream input;
- private final Object inputLock = new Object();
-
- protected StreamBridge(final OutputStream out) {
- super(out);
- }
-
- protected StreamBridge() {
- this(null);
- }
-
- /**
- * Provides the input view.
- */
- InputStream getInput() throws IOException {
- synchronized (inputLock) {
- if (input == null) {
- input = getInputView();
- }
- }
- return input;
- }
-
- /**
- * Creates the input view.
- */
- abstract InputStream getInputView() throws IOException;
-
- /**
- * Closes input and output and releases all associated resources.
- */
- void stop() throws IOException {
- close();
- synchronized (inputLock) {
- if (input != null) {
- input.close();
- input = null;
- }
- }
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/pack200/TempFileCachingStreamBridge.java b/src/org/apache/commons/compress/compressors/pack200/TempFileCachingStreamBridge.java
deleted file mode 100644
index dc612aa200e..00000000000
--- a/src/org/apache/commons/compress/compressors/pack200/TempFileCachingStreamBridge.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.compressors.pack200;
-
-import java.io.File;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Files;
-
-/**
- * StreamSwitcher that caches all data written to the output side in
- * a temporary file.
- * @since 1.3
- */
-class TempFileCachingStreamBridge extends StreamBridge {
- private final File f;
-
- TempFileCachingStreamBridge() throws IOException {
- f = File.createTempFile("commons-compress", "packtemp");
- f.deleteOnExit();
- out = Files.newOutputStream(f.toPath());
- }
-
- @Override
- InputStream getInputView() throws IOException {
- out.close();
- return new FilterInputStream(Files.newInputStream(f.toPath())) {
- @Override
- public void close() throws IOException {
- super.close();
- f.delete();
- }
- };
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/pack200/package.html b/src/org/apache/commons/compress/compressors/pack200/package.html
deleted file mode 100644
index 9dbf2a065e1..00000000000
--- a/src/org/apache/commons/compress/compressors/pack200/package.html
+++ /dev/null
@@ -1,82 +0,0 @@
-
-
-
-
Provides stream classes for compressing and decompressing
- streams using the Pack200 algorithm used to compress Java
- archives.
-
-
The streams of this package only work on JAR archives, i.e. a
- {@link
- org.apache.commons.compress.compressors.pack200.Pack200CompressorOutputStream
- Pack200CompressorOutputStream} expects to be wrapped around a
- stream that a valid JAR archive will be written to and a {@link
- org.apache.commons.compress.compressors.pack200.Pack200CompressorInputStream
- Pack200CompressorInputStream} provides a stream to read from a
- JAR archive.
-
-
JAR archives compressed with Pack200 will in general be
- different from the original archive when decompressed again.
- For details see
- the API
- documentation of Pack200.
-
-
The streams of this package work on non-deflated streams,
- i.e. archives like those created with the --no-gzip
- option of the JDK's pack200 command line tool. If
- you want to work on deflated streams you must use an additional
- stream layer - for example by using Apache Commons Compress'
- gzip package.
-
-
The Pack200 API provided by the Java class library doesn't lend
- itself to real stream
- processing. Pack200CompressorInputStream will
- uncompress its input immediately and then provide
- an InputStream to a cached result.
- Likewise Pack200CompressorOutputStream will not
- write anything to the given OutputStream
- until finish or close is called - at
- which point the cached output written so far gets
- compressed.
-
-
Two different caching modes are available - "in memory", which
- is the default, and "temporary file". By default data is cached
- in memory but you should switch to the temporary file option if
- your archives are really big.
-
-
Given there always is an intermediate result
- the getBytesRead and getCount methods
- of Pack200CompressorInputStream are meaningless
- (read from the real stream or from the intermediate result?)
- and always return 0.
-
-
During development of the initial version several attempts have
- been made to use a real streaming API based for example
- on Piped(In|Out)putStream or explicit stream
- pumping like Commons Exec's InputStreamPumper but
- they have all failed because they rely on the output end to be
- consumed completely or else the (un)pack will block
- forever. Especially for Pack200InputStream it is
- very likely that it will be wrapped in
- a ZipArchiveInputStream which will never read the
- archive completely as it is not interested in the ZIP central
- directory data at the end of the JAR archive.
Provides a unified API and factories for dealing with
- compressed streams.
-
-
diff --git a/src/org/apache/commons/compress/compressors/snappy/FramedSnappyCompressorInputStream.java b/src/org/apache/commons/compress/compressors/snappy/FramedSnappyCompressorInputStream.java
deleted file mode 100644
index 12974ab5597..00000000000
--- a/src/org/apache/commons/compress/compressors/snappy/FramedSnappyCompressorInputStream.java
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.snappy;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PushbackInputStream;
-import java.util.Arrays;
-
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.BoundedInputStream;
-import org.apache.commons.compress.utils.ByteUtils;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * CompressorInputStream for the framing Snappy format.
- *
- *
Based on the "spec" in the version "Last revised: 2013-10-25"
- *
- * @see Snappy framing format description
- * @since 1.7
- */
-public class FramedSnappyCompressorInputStream extends CompressorInputStream
- implements InputStreamStatistics {
-
- /**
- * package private for tests only.
- */
- static final long MASK_OFFSET = 0xa282ead8L;
-
- private static final int STREAM_IDENTIFIER_TYPE = 0xff;
- static final int COMPRESSED_CHUNK_TYPE = 0;
- private static final int UNCOMPRESSED_CHUNK_TYPE = 1;
- private static final int PADDING_CHUNK_TYPE = 0xfe;
- private static final int MIN_UNSKIPPABLE_TYPE = 2;
- private static final int MAX_UNSKIPPABLE_TYPE = 0x7f;
- private static final int MAX_SKIPPABLE_TYPE = 0xfd;
-
- // used by FramedSnappyCompressorOutputStream as well
- static final byte[] SZ_SIGNATURE = new byte[] { //NOSONAR
- (byte) STREAM_IDENTIFIER_TYPE, // tag
- 6, 0, 0, // length
- 's', 'N', 'a', 'P', 'p', 'Y'
- };
-
- private long unreadBytes;
- private final CountingInputStream countingStream;
-
- /** The underlying stream to read compressed data from */
- private final PushbackInputStream in;
-
- /** The dialect to expect */
- private final FramedSnappyDialect dialect;
-
- private SnappyCompressorInputStream currentCompressedChunk;
-
- // used in no-arg read method
- private final byte[] oneByte = new byte[1];
-
- private boolean endReached, inUncompressedChunk;
-
- private int uncompressedBytesRemaining;
- private long expectedChecksum = -1;
- private final int blockSize;
- private final PureJavaCrc32C checksum = new PureJavaCrc32C();
-
- private final ByteUtils.ByteSupplier supplier = new ByteUtils.ByteSupplier() {
- @Override
- public int getAsByte() throws IOException {
- return readOneByte();
- }
- };
-
- /**
- * Constructs a new input stream that decompresses
- * snappy-framed-compressed data from the specified input stream
- * using the {@link FramedSnappyDialect#STANDARD} dialect.
- * @param in the InputStream from which to read the compressed data
- * @throws IOException if reading fails
- */
- public FramedSnappyCompressorInputStream(final InputStream in) throws IOException {
- this(in, FramedSnappyDialect.STANDARD);
- }
-
- /**
- * Constructs a new input stream that decompresses snappy-framed-compressed data
- * from the specified input stream.
- * @param in the InputStream from which to read the compressed data
- * @param dialect the dialect used by the compressed stream
- * @throws IOException if reading fails
- */
- public FramedSnappyCompressorInputStream(final InputStream in,
- final FramedSnappyDialect dialect)
- throws IOException {
- this(in, SnappyCompressorInputStream.DEFAULT_BLOCK_SIZE, dialect);
- }
-
- /**
- * Constructs a new input stream that decompresses snappy-framed-compressed data
- * from the specified input stream.
- * @param in the InputStream from which to read the compressed data
- * @param blockSize the block size to use for the compressed stream
- * @param dialect the dialect used by the compressed stream
- * @throws IOException if reading fails
- * @since 1.14
- */
- public FramedSnappyCompressorInputStream(final InputStream in,
- final int blockSize,
- final FramedSnappyDialect dialect)
- throws IOException {
- countingStream = new CountingInputStream(in);
- this.in = new PushbackInputStream(countingStream, 1);
- this.blockSize = blockSize;
- this.dialect = dialect;
- if (dialect.hasStreamIdentifier()) {
- readStreamIdentifier();
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public int read() throws IOException {
- return read(oneByte, 0, 1) == -1 ? -1 : oneByte[0] & 0xFF;
- }
-
- /** {@inheritDoc} */
- @Override
- public void close() throws IOException {
- if (currentCompressedChunk != null) {
- currentCompressedChunk.close();
- currentCompressedChunk = null;
- }
- in.close();
- }
-
- /** {@inheritDoc} */
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- int read = readOnce(b, off, len);
- if (read == -1) {
- readNextBlock();
- if (endReached) {
- return -1;
- }
- read = readOnce(b, off, len);
- }
- return read;
- }
-
- /** {@inheritDoc} */
- @Override
- public int available() throws IOException {
- if (inUncompressedChunk) {
- return Math.min(uncompressedBytesRemaining,
- in.available());
- } else if (currentCompressedChunk != null) {
- return currentCompressedChunk.available();
- }
- return 0;
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return countingStream.getBytesRead() - unreadBytes;
- }
-
- /**
- * Read from the current chunk into the given array.
- *
- * @return -1 if there is no current chunk or the number of bytes
- * read from the current chunk (which may be -1 if the end of the
- * chunk is reached).
- */
- private int readOnce(final byte[] b, final int off, final int len) throws IOException {
- int read = -1;
- if (inUncompressedChunk) {
- final int amount = Math.min(uncompressedBytesRemaining, len);
- if (amount == 0) {
- return -1;
- }
- read = in.read(b, off, amount);
- if (read != -1) {
- uncompressedBytesRemaining -= read;
- count(read);
- }
- } else if (currentCompressedChunk != null) {
- final long before = currentCompressedChunk.getBytesRead();
- read = currentCompressedChunk.read(b, off, len);
- if (read == -1) {
- currentCompressedChunk.close();
- currentCompressedChunk = null;
- } else {
- count(currentCompressedChunk.getBytesRead() - before);
- }
- }
- if (read > 0) {
- checksum.update(b, off, read);
- }
- return read;
- }
-
- private void readNextBlock() throws IOException {
- verifyLastChecksumAndReset();
- inUncompressedChunk = false;
- final int type = readOneByte();
- if (type == -1) {
- endReached = true;
- } else if (type == STREAM_IDENTIFIER_TYPE) {
- in.unread(type);
- unreadBytes++;
- pushedBackBytes(1);
- readStreamIdentifier();
- readNextBlock();
- } else if (type == PADDING_CHUNK_TYPE
- || (type > MAX_UNSKIPPABLE_TYPE && type <= MAX_SKIPPABLE_TYPE)) {
- skipBlock();
- readNextBlock();
- } else if (type >= MIN_UNSKIPPABLE_TYPE && type <= MAX_UNSKIPPABLE_TYPE) {
- throw new IOException("unskippable chunk with type " + type
- + " (hex " + Integer.toHexString(type) + ")"
- + " detected.");
- } else if (type == UNCOMPRESSED_CHUNK_TYPE) {
- inUncompressedChunk = true;
- uncompressedBytesRemaining = readSize() - 4 /* CRC */;
- expectedChecksum = unmask(readCrc());
- } else if (type == COMPRESSED_CHUNK_TYPE) {
- final boolean expectChecksum = dialect.usesChecksumWithCompressedChunks();
- final long size = readSize() - (expectChecksum ? 4L : 0L);
- if (expectChecksum) {
- expectedChecksum = unmask(readCrc());
- } else {
- expectedChecksum = -1;
- }
- currentCompressedChunk =
- new SnappyCompressorInputStream(new BoundedInputStream(in, size), blockSize);
- // constructor reads uncompressed size
- count(currentCompressedChunk.getBytesRead());
- } else {
- // impossible as all potential byte values have been covered
- throw new IOException("unknown chunk type " + type
- + " detected.");
- }
- }
-
- private long readCrc() throws IOException {
- final byte[] b = new byte[4];
- final int read = IOUtils.readFully(in, b);
- count(read);
- if (read != 4) {
- throw new IOException("premature end of stream");
- }
- return ByteUtils.fromLittleEndian(b);
- }
-
- static long unmask(long x) {
- // ugly, maybe we should just have used ints and deal with the
- // overflow
- x -= MASK_OFFSET;
- x &= 0xffffFFFFL;
- return ((x >> 17) | (x << 15)) & 0xffffFFFFL;
- }
-
- private int readSize() throws IOException {
- return (int) ByteUtils.fromLittleEndian(supplier, 3);
- }
-
- private void skipBlock() throws IOException {
- final int size = readSize();
- final long read = IOUtils.skip(in, size);
- count(read);
- if (read != size) {
- throw new IOException("premature end of stream");
- }
- }
-
- private void readStreamIdentifier() throws IOException {
- final byte[] b = new byte[10];
- final int read = IOUtils.readFully(in, b);
- count(read);
- if (10 != read || !matches(b, 10)) {
- throw new IOException("Not a framed Snappy stream");
- }
- }
-
- private int readOneByte() throws IOException {
- final int b = in.read();
- if (b != -1) {
- count(1);
- return b & 0xFF;
- }
- return -1;
- }
-
- private void verifyLastChecksumAndReset() throws IOException {
- if (expectedChecksum >= 0 && expectedChecksum != checksum.getValue()) {
- throw new IOException("Checksum verification failed");
- }
- expectedChecksum = -1;
- checksum.reset();
- }
-
- /**
- * Checks if the signature matches what is expected for a .sz file.
- *
- *
.sz files start with a chunk with tag 0xff and content sNaPpY.
- *
- * @param signature the bytes to check
- * @param length the number of bytes to check
- * @return true if this is a .sz stream, false otherwise
- */
- public static boolean matches(final byte[] signature, final int length) {
-
- if (length < SZ_SIGNATURE.length) {
- return false;
- }
-
- byte[] shortenedSig = signature;
- if (signature.length > SZ_SIGNATURE.length) {
- shortenedSig = new byte[SZ_SIGNATURE.length];
- System.arraycopy(signature, 0, shortenedSig, 0, SZ_SIGNATURE.length);
- }
-
- return Arrays.equals(shortenedSig, SZ_SIGNATURE);
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/snappy/FramedSnappyCompressorOutputStream.java b/src/org/apache/commons/compress/compressors/snappy/FramedSnappyCompressorOutputStream.java
deleted file mode 100644
index 08cd619010d..00000000000
--- a/src/org/apache/commons/compress/compressors/snappy/FramedSnappyCompressorOutputStream.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.snappy;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-import org.apache.commons.compress.compressors.lz77support.Parameters;
-import org.apache.commons.compress.utils.ByteUtils;
-
-/**
- * CompressorOutputStream for the framing Snappy format.
- *
- *
Based on the "spec" in the version "Last revised: 2013-10-25"
- *
- * @see Snappy framing format description
- * @since 1.14
- * @NotThreadSafe
- */
-public class FramedSnappyCompressorOutputStream extends CompressorOutputStream {
- // see spec:
- // > However, we place an additional restriction that the uncompressed data
- // > in a chunk must be no longer than 65536 bytes. This allows consumers to
- // > easily use small fixed-size buffers.
- private static final int MAX_COMPRESSED_BUFFER_SIZE = 1 << 16;
-
- private final OutputStream out;
- private final Parameters params;
- private final PureJavaCrc32C checksum = new PureJavaCrc32C();
- // used in one-arg write method
- private final byte[] oneByte = new byte[1];
- private final byte[] buffer = new byte[MAX_COMPRESSED_BUFFER_SIZE];
- private int currentIndex = 0;
-
- private final ByteUtils.ByteConsumer consumer;
-
- /**
- * Constructs a new output stream that compresses
- * snappy-framed-compressed data to the specified output stream.
- * @param out the OutputStream to which to write the compressed data
- * @throws IOException if writing the signature fails
- */
- public FramedSnappyCompressorOutputStream(final OutputStream out) throws IOException {
- this(out, SnappyCompressorOutputStream.createParameterBuilder(SnappyCompressorInputStream.DEFAULT_BLOCK_SIZE)
- .build());
- }
-
- /**
- * Constructs a new output stream that compresses
- * snappy-framed-compressed data to the specified output stream.
- * @param out the OutputStream to which to write the compressed data
- * @param params parameters used to fine-tune compression, in
- * particular to balance compression ratio vs compression speed.
- * @throws IOException if writing the signature fails
- */
- public FramedSnappyCompressorOutputStream(final OutputStream out, Parameters params) throws IOException {
- this.out = out;
- this.params = params;
- consumer = new ByteUtils.OutputStreamByteConsumer(out);
- out.write(FramedSnappyCompressorInputStream.SZ_SIGNATURE);
- }
-
- @Override
- public void write(int b) throws IOException {
- oneByte[0] = (byte) (b & 0xff);
- write(oneByte);
- }
-
- @Override
- public void write(byte[] data, int off, int len) throws IOException {
- if (currentIndex + len > MAX_COMPRESSED_BUFFER_SIZE) {
- flushBuffer();
- while (len > MAX_COMPRESSED_BUFFER_SIZE) {
- System.arraycopy(data, off, buffer, 0, MAX_COMPRESSED_BUFFER_SIZE);
- off += MAX_COMPRESSED_BUFFER_SIZE;
- len -= MAX_COMPRESSED_BUFFER_SIZE;
- currentIndex = MAX_COMPRESSED_BUFFER_SIZE;
- flushBuffer();
- }
- }
- System.arraycopy(data, off, buffer, currentIndex, len);
- currentIndex += len;
- }
-
- @Override
- public void close() throws IOException {
- finish();
- out.close();
- }
-
- /**
- * Compresses all remaining data and writes it to the stream,
- * doesn't close the underlying stream.
- * @throws IOException if an error occurs
- */
- public void finish() throws IOException {
- if (currentIndex > 0) {
- flushBuffer();
- }
- }
-
- private void flushBuffer() throws IOException {
- out.write(FramedSnappyCompressorInputStream.COMPRESSED_CHUNK_TYPE);
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- try (OutputStream o = new SnappyCompressorOutputStream(baos, currentIndex, params)) {
- o.write(buffer, 0, currentIndex);
- }
- byte[] b = baos.toByteArray();
- writeLittleEndian(3, b.length + 4L /* CRC */);
- writeCrc();
- out.write(b);
- currentIndex = 0;
- }
-
- private void writeLittleEndian(final int numBytes, long num) throws IOException {
- ByteUtils.toLittleEndian(consumer, num, numBytes);
- }
-
- private void writeCrc() throws IOException {
- checksum.update(buffer, 0, currentIndex);
- writeLittleEndian(4, mask(checksum.getValue()));
- checksum.reset();
- }
-
- static long mask(long x) {
- // ugly, maybe we should just have used ints and deal with the
- // overflow
- x = ((x >> 15) | (x << 17));
- x += FramedSnappyCompressorInputStream.MASK_OFFSET;
- x &= 0xffffFFFFL;
- return x;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/snappy/FramedSnappyDialect.java b/src/org/apache/commons/compress/compressors/snappy/FramedSnappyDialect.java
deleted file mode 100644
index b83b7a50914..00000000000
--- a/src/org/apache/commons/compress/compressors/snappy/FramedSnappyDialect.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.snappy;
-
-/**
- * Dialects of the framing format that {@link FramedSnappyCompressorInputStream} can deal with.
- * @since 1.12
- */
-public enum FramedSnappyDialect {
- /**
- * The standard as defined by the Snappy
- * framing format description
- */
- STANDARD(true, true),
- /**
- * The format used by Apple's iWork Archives (.iwa files).
- */
- IWORK_ARCHIVE(false, false);
-
- private final boolean streamIdentifier, checksumWithCompressedChunks;
-
- FramedSnappyDialect(final boolean hasStreamIdentifier,
- final boolean usesChecksumWithCompressedChunks) {
- this.streamIdentifier = hasStreamIdentifier;
- this.checksumWithCompressedChunks = usesChecksumWithCompressedChunks;
- }
-
- boolean hasStreamIdentifier() {
- return streamIdentifier;
- }
-
- boolean usesChecksumWithCompressedChunks() {
- return checksumWithCompressedChunks;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/snappy/PureJavaCrc32C.java b/src/org/apache/commons/compress/compressors/snappy/PureJavaCrc32C.java
deleted file mode 100644
index 4c9738b912b..00000000000
--- a/src/org/apache/commons/compress/compressors/snappy/PureJavaCrc32C.java
+++ /dev/null
@@ -1,638 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * Some portions of this file Copyright (c) 2004-2006 Intel Corportation
- * and licensed under the BSD license.
- */
-package org.apache.commons.compress.compressors.snappy;
-
-import java.util.zip.Checksum;
-
-/**
- * A pure-java implementation of the CRC32 checksum that uses
- * the CRC32-C polynomial, the same polynomial used by iSCSI
- * and implemented on many Intel chipsets supporting SSE4.2.
- *
- *
This file is a copy of the implementation at the Apache Hadoop project.
This implementation uses an internal buffer in order to handle
- * the back-references that are at the heart of the LZ77 algorithm.
- * The size of the buffer must be at least as big as the biggest
- * offset used in the compressed stream. The current version of the
- * Snappy algorithm as defined by Google works on 32k blocks and
- * doesn't contain offsets bigger than 32k which is the default block
- * size used by this class.
- *
- * @see Snappy compressed format description
- * @since 1.7
- */
-public class SnappyCompressorInputStream extends AbstractLZ77CompressorInputStream {
-
- /** Mask used to determine the type of "tag" is being processed */
- private static final int TAG_MASK = 0x03;
-
- /** Default block size */
- public static final int DEFAULT_BLOCK_SIZE = 32768;
-
- /** The size of the uncompressed data */
- private final int size;
-
- /** Number of uncompressed bytes still to be read. */
- private int uncompressedBytesRemaining;
-
- /** Current state of the stream */
- private State state = State.NO_BLOCK;
-
- private boolean endReached = false;
-
- /**
- * Constructor using the default buffer size of 32k.
- *
- * @param is
- * An InputStream to read compressed data from
- *
- * @throws IOException if reading fails
- */
- public SnappyCompressorInputStream(final InputStream is) throws IOException {
- this(is, DEFAULT_BLOCK_SIZE);
- }
-
- /**
- * Constructor using a configurable buffer size.
- *
- * @param is
- * An InputStream to read compressed data from
- * @param blockSize
- * The block size used in compression
- *
- * @throws IOException if reading fails
- */
- public SnappyCompressorInputStream(final InputStream is, final int blockSize)
- throws IOException {
- super(is, blockSize);
- uncompressedBytesRemaining = size = (int) readSize();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- if (endReached) {
- return -1;
- }
- switch (state) {
- case NO_BLOCK:
- fill();
- return read(b, off, len);
- case IN_LITERAL:
- int litLen = readLiteral(b, off, len);
- if (!hasMoreDataInBlock()) {
- state = State.NO_BLOCK;
- }
- return litLen > 0 ? litLen : read(b, off, len);
- case IN_BACK_REFERENCE:
- int backReferenceLen = readBackReference(b, off, len);
- if (!hasMoreDataInBlock()) {
- state = State.NO_BLOCK;
- }
- return backReferenceLen > 0 ? backReferenceLen : read(b, off, len);
- default:
- throw new IOException("Unknown stream state " + state);
- }
- }
-
- /**
- * Try to fill the buffer with the next block of data.
- */
- private void fill() throws IOException {
- if (uncompressedBytesRemaining == 0) {
- endReached = true;
- return;
- }
-
- int b = readOneByte();
- if (b == -1) {
- throw new IOException("Premature end of stream reading block start");
- }
- int length = 0;
- int offset = 0;
-
- switch (b & TAG_MASK) {
-
- case 0x00:
-
- length = readLiteralLength(b);
- uncompressedBytesRemaining -= length;
- startLiteral(length);
- state = State.IN_LITERAL;
- break;
-
- case 0x01:
-
- /*
- * These elements can encode lengths between [4..11] bytes and
- * offsets between [0..2047] bytes. (len-4) occupies three bits
- * and is stored in bits [2..4] of the tag byte. The offset
- * occupies 11 bits, of which the upper three are stored in the
- * upper three bits ([5..7]) of the tag byte, and the lower
- * eight are stored in a byte following the tag byte.
- */
-
- length = 4 + ((b >> 2) & 0x07);
- uncompressedBytesRemaining -= length;
- offset = (b & 0xE0) << 3;
- b = readOneByte();
- if (b == -1) {
- throw new IOException("Premature end of stream reading back-reference length");
- }
- offset |= b;
-
- startBackReference(offset, length);
- state = State.IN_BACK_REFERENCE;
- break;
-
- case 0x02:
-
- /*
- * These elements can encode lengths between [1..64] and offsets
- * from [0..65535]. (len-1) occupies six bits and is stored in
- * the upper six bits ([2..7]) of the tag byte. The offset is
- * stored as a little-endian 16-bit integer in the two bytes
- * following the tag byte.
- */
-
- length = (b >> 2) + 1;
- uncompressedBytesRemaining -= length;
-
- offset = (int) ByteUtils.fromLittleEndian(supplier, 2);
-
- startBackReference(offset, length);
- state = State.IN_BACK_REFERENCE;
- break;
-
- case 0x03:
-
- /*
- * These are like the copies with 2-byte offsets (see previous
- * subsection), except that the offset is stored as a 32-bit
- * integer instead of a 16-bit integer (and thus will occupy
- * four bytes).
- */
-
- length = (b >> 2) + 1;
- uncompressedBytesRemaining -= length;
-
- offset = (int) ByteUtils.fromLittleEndian(supplier, 4) & 0x7fffffff;
-
- startBackReference(offset, length);
- state = State.IN_BACK_REFERENCE;
- break;
- default:
- // impossible as TAG_MASK is two bits and all four possible cases have been covered
- break;
- }
- }
-
- /*
- * For literals up to and including 60 bytes in length, the
- * upper six bits of the tag byte contain (len-1). The literal
- * follows immediately thereafter in the bytestream. - For
- * longer literals, the (len-1) value is stored after the tag
- * byte, little-endian. The upper six bits of the tag byte
- * describe how many bytes are used for the length; 60, 61, 62
- * or 63 for 1-4 bytes, respectively. The literal itself follows
- * after the length.
- */
- private int readLiteralLength(final int b) throws IOException {
- int length;
- switch (b >> 2) {
- case 60:
- length = readOneByte();
- if (length == -1) {
- throw new IOException("Premature end of stream reading literal length");
- }
- break;
- case 61:
- length = (int) ByteUtils.fromLittleEndian(supplier, 2);
- break;
- case 62:
- length = (int) ByteUtils.fromLittleEndian(supplier, 3);
- break;
- case 63:
- length = (int) ByteUtils.fromLittleEndian(supplier, 4);
- break;
- default:
- length = b >> 2;
- break;
- }
-
- return length + 1;
- }
-
- /**
- * The stream starts with the uncompressed length (up to a maximum of 2^32 -
- * 1), stored as a little-endian varint. Varints consist of a series of
- * bytes, where the lower 7 bits are data and the upper bit is set iff there
- * are more bytes to be read. In other words, an uncompressed length of 64
- * would be stored as 0x40, and an uncompressed length of 2097150 (0x1FFFFE)
- * would be stored as 0xFE 0xFF 0x7F.
- *
- * @return The size of the uncompressed data
- *
- * @throws IOException
- * Could not read a byte
- */
- private long readSize() throws IOException {
- int index = 0;
- long sz = 0;
- int b = 0;
-
- do {
- b = readOneByte();
- if (b == -1) {
- throw new IOException("Premature end of stream reading size");
- }
- sz |= (b & 0x7f) << (index++ * 7);
- } while (0 != (b & 0x80));
- return sz;
- }
-
- /**
- * Get the uncompressed size of the stream
- *
- * @return the uncompressed size
- */
- @Override
- public int getSize() {
- return size;
- }
-
- private enum State {
- NO_BLOCK, IN_LITERAL, IN_BACK_REFERENCE
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/snappy/SnappyCompressorOutputStream.java b/src/org/apache/commons/compress/compressors/snappy/SnappyCompressorOutputStream.java
deleted file mode 100644
index 93a9d80f9e8..00000000000
--- a/src/org/apache/commons/compress/compressors/snappy/SnappyCompressorOutputStream.java
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.snappy;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-import org.apache.commons.compress.compressors.lz77support.LZ77Compressor;
-import org.apache.commons.compress.compressors.lz77support.Parameters;
-import org.apache.commons.compress.utils.ByteUtils;
-
-/**
- * CompressorOutputStream for the raw Snappy format.
- *
- *
This implementation uses an internal buffer in order to handle
- * the back-references that are at the heart of the LZ77 algorithm.
- * The size of the buffer must be at least as big as the biggest
- * offset used in the compressed stream. The current version of the
- * Snappy algorithm as defined by Google works on 32k blocks and
- * doesn't contain offsets bigger than 32k which is the default block
- * size used by this class.
- *
- *
The raw Snappy format requires the uncompressed size to be
- * written at the beginning of the stream using a varint
- * representation, i.e. the number of bytes needed to write the
- * information is not known before the uncompressed size is
- * known. We've chosen to make the uncompressedSize a parameter of the
- * constructor in favor of buffering the whole output until the size
- * is known. When using the {@link FramedSnappyCompressorOutputStream}
- * this limitation is taken care of by the warpping framing
- * format.
- *
- * @see Snappy compressed format description
- * @since 1.14
- * @NotThreadSafe
- */
-public class SnappyCompressorOutputStream extends CompressorOutputStream {
- private final LZ77Compressor compressor;
- private final OutputStream os;
- private final ByteUtils.ByteConsumer consumer;
-
- // used in one-arg write method
- private final byte[] oneByte = new byte[1];
-
- private boolean finished = false;
-
- /**
- * Constructor using the default block size of 32k.
- *
- * @param os the outputstream to write compressed data to
- * @param uncompressedSize the uncompressed size of data
- * @throws IOException if writing of the size fails
- */
- public SnappyCompressorOutputStream(final OutputStream os, final long uncompressedSize) throws IOException {
- this(os, uncompressedSize, SnappyCompressorInputStream.DEFAULT_BLOCK_SIZE);
- }
-
- /**
- * Constructor using a configurable block size.
- *
- * @param os the outputstream to write compressed data to
- * @param uncompressedSize the uncompressed size of data
- * @param blockSize the block size used - must be a power of two
- * @throws IOException if writing of the size fails
- */
- public SnappyCompressorOutputStream(final OutputStream os, final long uncompressedSize, final int blockSize)
- throws IOException {
- this(os, uncompressedSize, createParameterBuilder(blockSize).build());
- }
-
- /**
- * Constructor providing full control over the underlying LZ77 compressor.
- *
- * @param os the outputstream to write compressed data to
- * @param uncompressedSize the uncompressed size of data
- * @param params the parameters to use by the compressor - note
- * that the format itself imposes some limits like a maximum match
- * length of 64 bytes
- * @throws IOException if writing of the size fails
- */
- public SnappyCompressorOutputStream(final OutputStream os, final long uncompressedSize, Parameters params)
- throws IOException {
- this.os = os;
- consumer = new ByteUtils.OutputStreamByteConsumer(os);
- compressor = new LZ77Compressor(params, new LZ77Compressor.Callback() {
- @Override
- public void accept(LZ77Compressor.Block block) throws IOException {
- switch (block.getType()) {
- case LITERAL:
- writeLiteralBlock((LZ77Compressor.LiteralBlock) block);
- break;
- case BACK_REFERENCE:
- writeBackReference((LZ77Compressor.BackReference) block);
- break;
- case EOD:
- break;
- }
- }
- });
- writeUncompressedSize(uncompressedSize);
- }
-
- @Override
- public void write(int b) throws IOException {
- oneByte[0] = (byte) (b & 0xff);
- write(oneByte);
- }
-
- @Override
- public void write(byte[] data, int off, int len) throws IOException {
- compressor.compress(data, off, len);
- }
-
- @Override
- public void close() throws IOException {
- finish();
- os.close();
- }
-
- /**
- * Compresses all remaining data and writes it to the stream,
- * doesn't close the underlying stream.
- * @throws IOException if an error occurs
- */
- public void finish() throws IOException {
- if (!finished) {
- compressor.finish();
- finished = true;
- }
- }
-
- private void writeUncompressedSize(long uncompressedSize) throws IOException {
- boolean more = false;
- do {
- int currentByte = (int) (uncompressedSize & 0x7F);
- more = uncompressedSize > currentByte;
- if (more) {
- currentByte |= 0x80;
- }
- os.write(currentByte);
- uncompressedSize >>= 7;
- } while (more);
- }
-
- // literal length is stored as (len - 1) either inside the tag
- // (six bits minus four flags) or in 1 to 4 bytes after the tag
- private static final int MAX_LITERAL_SIZE_WITHOUT_SIZE_BYTES = 60;
- private static final int MAX_LITERAL_SIZE_WITH_ONE_SIZE_BYTE = 1 << 8;
- private static final int MAX_LITERAL_SIZE_WITH_TWO_SIZE_BYTES = 1 << 16;
- private static final int MAX_LITERAL_SIZE_WITH_THREE_SIZE_BYTES = 1 << 24;
-
- private static final int ONE_SIZE_BYTE_MARKER = 60 << 2;
- private static final int TWO_SIZE_BYTE_MARKER = 61 << 2;
- private static final int THREE_SIZE_BYTE_MARKER = 62 << 2;
- private static final int FOUR_SIZE_BYTE_MARKER = 63 << 2;
-
- private void writeLiteralBlock(LZ77Compressor.LiteralBlock block) throws IOException {
- int len = block.getLength();
- if (len <= MAX_LITERAL_SIZE_WITHOUT_SIZE_BYTES) {
- writeLiteralBlockNoSizeBytes(block, len);
- } else if (len <= MAX_LITERAL_SIZE_WITH_ONE_SIZE_BYTE) {
- writeLiteralBlockOneSizeByte(block, len);
- } else if (len <= MAX_LITERAL_SIZE_WITH_TWO_SIZE_BYTES) {
- writeLiteralBlockTwoSizeBytes(block, len);
- } else if (len <= MAX_LITERAL_SIZE_WITH_THREE_SIZE_BYTES) {
- writeLiteralBlockThreeSizeBytes(block, len);
- } else {
- writeLiteralBlockFourSizeBytes(block, len);
- }
- }
-
- private void writeLiteralBlockNoSizeBytes(LZ77Compressor.LiteralBlock block, int len) throws IOException {
- writeLiteralBlockWithSize(len - 1 << 2, 0, len, block);
- }
-
- private void writeLiteralBlockOneSizeByte(LZ77Compressor.LiteralBlock block, int len) throws IOException {
- writeLiteralBlockWithSize(ONE_SIZE_BYTE_MARKER, 1, len, block);
- }
-
- private void writeLiteralBlockTwoSizeBytes(LZ77Compressor.LiteralBlock block, int len) throws IOException {
- writeLiteralBlockWithSize(TWO_SIZE_BYTE_MARKER, 2, len, block);
- }
-
- private void writeLiteralBlockThreeSizeBytes(LZ77Compressor.LiteralBlock block, int len) throws IOException {
- writeLiteralBlockWithSize(THREE_SIZE_BYTE_MARKER, 3, len, block);
- }
-
- private void writeLiteralBlockFourSizeBytes(LZ77Compressor.LiteralBlock block, int len) throws IOException {
- writeLiteralBlockWithSize(FOUR_SIZE_BYTE_MARKER, 4, len, block);
- }
-
- private void writeLiteralBlockWithSize(int tagByte, int sizeBytes, int len, LZ77Compressor.LiteralBlock block)
- throws IOException {
- os.write(tagByte);
- writeLittleEndian(sizeBytes, len - 1);
- os.write(block.getData(), block.getOffset(), len);
- }
-
- private void writeLittleEndian(final int numBytes, int num) throws IOException {
- ByteUtils.toLittleEndian(consumer, num, numBytes);
- }
-
- // Back-references ("copies") have their offset/size information
- // in two, three or five bytes.
- private static final int MIN_MATCH_LENGTH_WITH_ONE_OFFSET_BYTE = 4;
- private static final int MAX_MATCH_LENGTH_WITH_ONE_OFFSET_BYTE = 11;
- private static final int MAX_OFFSET_WITH_ONE_OFFSET_BYTE = 1 << 11 - 1;
- private static final int MAX_OFFSET_WITH_TWO_OFFSET_BYTES = 1 << 16 - 1;
-
- private static final int ONE_BYTE_COPY_TAG = 1;
- private static final int TWO_BYTE_COPY_TAG = 2;
- private static final int FOUR_BYTE_COPY_TAG = 3;
-
- private void writeBackReference(LZ77Compressor.BackReference block) throws IOException {
- final int len = block.getLength();
- final int offset = block.getOffset();
- if (len >= MIN_MATCH_LENGTH_WITH_ONE_OFFSET_BYTE && len <= MAX_MATCH_LENGTH_WITH_ONE_OFFSET_BYTE
- && offset <= MAX_OFFSET_WITH_ONE_OFFSET_BYTE) {
- writeBackReferenceWithOneOffsetByte(len, offset);
- } else if (offset < MAX_OFFSET_WITH_TWO_OFFSET_BYTES) {
- writeBackReferenceWithTwoOffsetBytes(len, offset);
- } else {
- writeBackReferenceWithFourOffsetBytes(len, offset);
- }
- }
-
- private void writeBackReferenceWithOneOffsetByte(int len, int offset) throws IOException {
- os.write(ONE_BYTE_COPY_TAG | ((len - 4) << 2) | ((offset & 0x700) >> 3));
- os.write(offset & 0xff);
- }
-
- private void writeBackReferenceWithTwoOffsetBytes(int len, int offset) throws IOException {
- writeBackReferenceWithLittleEndianOffset(TWO_BYTE_COPY_TAG, 2, len, offset);
- }
-
- private void writeBackReferenceWithFourOffsetBytes(int len, int offset) throws IOException {
- writeBackReferenceWithLittleEndianOffset(FOUR_BYTE_COPY_TAG, 4, len, offset);
- }
-
- private void writeBackReferenceWithLittleEndianOffset(int tag, int offsetBytes, int len, int offset)
- throws IOException {
- os.write(tag | ((len - 1) << 2));
- writeLittleEndian(offsetBytes, offset);
- }
-
- // technically the format could use shorter matches but with a
- // length of three the offset would be encoded as at least two
- // bytes in addition to the tag, so yield no compression at all
- private static final int MIN_MATCH_LENGTH = 4;
- // Snappy stores the match length in six bits of the tag
- private static final int MAX_MATCH_LENGTH = 64;
-
- /**
- * Returns a builder correctly configured for the Snappy algorithm using the gven block size.
- * @param blockSize the block size.
- * @return a builder correctly configured for the Snappy algorithm using the gven block size
- */
- public static Parameters.Builder createParameterBuilder(int blockSize) {
- // the max offset and max literal length defined by the format
- // are 2^32 - 1 and 2^32 respectively - with blockSize being
- // an integer we will never exceed that
- return Parameters.builder(blockSize)
- .withMinBackReferenceLength(MIN_MATCH_LENGTH)
- .withMaxBackReferenceLength(MAX_MATCH_LENGTH)
- .withMaxOffset(blockSize)
- .withMaxLiteralLength(blockSize);
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/snappy/package.html b/src/org/apache/commons/compress/compressors/snappy/package.html
deleted file mode 100644
index efef0718013..00000000000
--- a/src/org/apache/commons/compress/compressors/snappy/package.html
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-
-
Provides stream classes for the
- Snappy
- algorithm.
-
-
The raw Snappy format which only contains the compressed data
- is supported by the SnappyCompressor*putStream
- classes while the so called "framing format" is implemented
- by FramedSnappyCompressor*putStream. Note there
- have been different versions of the framing format specification,
- the implementation in Commons Compress is based on the
- specification "Last revised: 2013-10-25".
-
-
Only the "framing format" can be auto-detected this means you
- have to speficy the format explicitly if you want to read a
- "raw" Snappy stream
- via CompressorStreamFactory.
-
-
diff --git a/src/org/apache/commons/compress/compressors/xz/XZCompressorInputStream.java b/src/org/apache/commons/compress/compressors/xz/XZCompressorInputStream.java
deleted file mode 100644
index 20d67a19ab9..00000000000
--- a/src/org/apache/commons/compress/compressors/xz/XZCompressorInputStream.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.xz;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.tukaani.xz.XZ;
-import org.tukaani.xz.SingleXZInputStream;
-import org.tukaani.xz.XZInputStream;
-
-import org.apache.commons.compress.MemoryLimitException;
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * XZ decompressor.
- * @since 1.4
- */
-public class XZCompressorInputStream extends CompressorInputStream
- implements InputStreamStatistics {
-
- private final CountingInputStream countingStream;
- private final InputStream in;
-
- /**
- * Checks if the signature matches what is expected for a .xz file.
- *
- * @param signature the bytes to check
- * @param length the number of bytes to check
- * @return true if signature matches the .xz magic bytes, false otherwise
- */
- public static boolean matches(final byte[] signature, final int length) {
- if (length < XZ.HEADER_MAGIC.length) {
- return false;
- }
-
- for (int i = 0; i < XZ.HEADER_MAGIC.length; ++i) {
- if (signature[i] != XZ.HEADER_MAGIC[i]) {
- return false;
- }
- }
-
- return true;
- }
-
- /**
- * Creates a new input stream that decompresses XZ-compressed data
- * from the specified input stream. This doesn't support
- * concatenated .xz files.
- *
- * @param inputStream where to read the compressed data
- *
- * @throws IOException if the input is not in the .xz format,
- * the input is corrupt or truncated, the .xz
- * headers specify options that are not supported
- * by this implementation, or the underlying
- * inputStream throws an exception
- */
- public XZCompressorInputStream(final InputStream inputStream)
- throws IOException {
- this(inputStream, false);
- }
-
- /**
- * Creates a new input stream that decompresses XZ-compressed data
- * from the specified input stream.
- *
- * @param inputStream where to read the compressed data
- * @param decompressConcatenated
- * if true, decompress until the end of the
- * input; if false, stop after the first .xz
- * stream and leave the input position to point
- * to the next byte after the .xz stream
- *
- * @throws IOException if the input is not in the .xz format,
- * the input is corrupt or truncated, the .xz
- * headers specify options that are not supported
- * by this implementation, or the underlying
- * inputStream throws an exception
- */
- public XZCompressorInputStream(final InputStream inputStream,
- final boolean decompressConcatenated)
- throws IOException {
- this(inputStream, decompressConcatenated, -1);
- }
-
- /**
- * Creates a new input stream that decompresses XZ-compressed data
- * from the specified input stream.
- *
- * @param inputStream where to read the compressed data
- * @param decompressConcatenated
- * if true, decompress until the end of the
- * input; if false, stop after the first .xz
- * stream and leave the input position to point
- * to the next byte after the .xz stream
- * @param memoryLimitInKb memory limit used when reading blocks. If
- * the estimated memory limit is exceeded on {@link #read()},
- * a {@link MemoryLimitException} is thrown.
- *
- * @throws IOException if the input is not in the .xz format,
- * the input is corrupt or truncated, the .xz
- * headers specify options that are not supported
- * by this implementation,
- * or the underlying inputStream throws an exception
- *
- * @since 1.14
- */
- public XZCompressorInputStream(InputStream inputStream,
- boolean decompressConcatenated, final int memoryLimitInKb)
- throws IOException {
- countingStream = new CountingInputStream(inputStream);
- if (decompressConcatenated) {
- in = new XZInputStream(countingStream, memoryLimitInKb);
- } else {
- in = new SingleXZInputStream(countingStream, memoryLimitInKb);
- }
- }
-
- @Override
- public int read() throws IOException {
- try {
- final int ret = in.read();
- count(ret == -1 ? -1 : 1);
- return ret;
- } catch (org.tukaani.xz.MemoryLimitException e) {
- throw new MemoryLimitException(e.getMemoryNeeded(), e.getMemoryLimit(), e);
- }
- }
-
- @Override
- public int read(final byte[] buf, final int off, final int len) throws IOException {
- try {
- final int ret = in.read(buf, off, len);
- count(ret);
- return ret;
- } catch (org.tukaani.xz.MemoryLimitException e) {
- //convert to commons-compress MemoryLimtException
- throw new MemoryLimitException(e.getMemoryNeeded(), e.getMemoryLimit(), e);
- }
- }
-
- @Override
- public long skip(final long n) throws IOException {
- try {
- return IOUtils.skip(in, n);
- } catch (org.tukaani.xz.MemoryLimitException e) {
- //convert to commons-compress MemoryLimtException
- throw new MemoryLimitException(e.getMemoryNeeded(), e.getMemoryLimit(), e);
- }
- }
-
- @Override
- public int available() throws IOException {
- return in.available();
- }
-
- @Override
- public void close() throws IOException {
- in.close();
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return countingStream.getBytesRead();
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/xz/XZCompressorOutputStream.java b/src/org/apache/commons/compress/compressors/xz/XZCompressorOutputStream.java
deleted file mode 100644
index 6e9b70e05fa..00000000000
--- a/src/org/apache/commons/compress/compressors/xz/XZCompressorOutputStream.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.xz;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import org.tukaani.xz.LZMA2Options;
-import org.tukaani.xz.XZOutputStream;
-
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-
-/**
- * XZ compressor.
- * @since 1.4
- */
-public class XZCompressorOutputStream extends CompressorOutputStream {
- private final XZOutputStream out;
-
- /**
- * Creates a new XZ compressor using the default LZMA2 options.
- * This is equivalent to XZCompressorOutputStream(outputStream, 6).
- * @param outputStream the stream to wrap
- * @throws IOException on error
- */
- public XZCompressorOutputStream(final OutputStream outputStream)
- throws IOException {
- out = new XZOutputStream(outputStream, new LZMA2Options());
- }
-
- /**
- * Creates a new XZ compressor using the specified LZMA2 preset level.
- *
- * The presets 0-3 are fast presets with medium compression.
- * The presets 4-6 are fairly slow presets with high compression.
- * The default preset is 6.
- *
- * The presets 7-9 are like the preset 6 but use bigger dictionaries
- * and have higher compressor and decompressor memory requirements.
- * Unless the uncompressed size of the file exceeds 8 MiB,
- * 16 MiB, or 32 MiB, it is waste of memory to use the
- * presets 7, 8, or 9, respectively.
- * @param outputStream the stream to wrap
- * @param preset the preset
- * @throws IOException on error
- */
- public XZCompressorOutputStream(final OutputStream outputStream, final int preset)
- throws IOException {
- out = new XZOutputStream(outputStream, new LZMA2Options(preset));
- }
-
- @Override
- public void write(final int b) throws IOException {
- out.write(b);
- }
-
- @Override
- public void write(final byte[] buf, final int off, final int len) throws IOException {
- out.write(buf, off, len);
- }
-
- /**
- * Flushes the encoder and calls outputStream.flush().
- * All buffered pending data will then be decompressible from
- * the output stream. Calling this function very often may increase
- * the compressed file size a lot.
- */
- @Override
- public void flush() throws IOException {
- out.flush();
- }
-
- /**
- * Finishes compression without closing the underlying stream.
- * No more data can be written to this stream after finishing.
- * @throws IOException on error
- */
- public void finish() throws IOException {
- out.finish();
- }
-
- @Override
- public void close() throws IOException {
- out.close();
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/xz/XZUtils.java b/src/org/apache/commons/compress/compressors/xz/XZUtils.java
deleted file mode 100644
index be4625e5dc2..00000000000
--- a/src/org/apache/commons/compress/compressors/xz/XZUtils.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.xz;
-
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.commons.compress.compressors.FileNameUtil;
-
-/**
- * Utility code for the xz compression format.
- * @ThreadSafe
- * @since 1.4
- */
-public class XZUtils {
-
- private static final FileNameUtil fileNameUtil;
-
- /**
- * XZ Header Magic Bytes begin a XZ file.
- *
- *
This is a copy of {@code org.tukaani.xz.XZ.HEADER_MAGIC} in
- * XZ for Java version 1.5.
- */
- private static final byte[] HEADER_MAGIC = {
- (byte) 0xFD, '7', 'z', 'X', 'Z', '\0'
- };
-
- enum CachedAvailability {
- DONT_CACHE, CACHED_AVAILABLE, CACHED_UNAVAILABLE
- }
-
- private static volatile CachedAvailability cachedXZAvailability;
-
- static {
- final Map uncompressSuffix = new HashMap<>();
- uncompressSuffix.put(".txz", ".tar");
- uncompressSuffix.put(".xz", "");
- uncompressSuffix.put("-xz", "");
- fileNameUtil = new FileNameUtil(uncompressSuffix, ".xz");
- cachedXZAvailability = CachedAvailability.DONT_CACHE;
- try {
- Class.forName("org.osgi.framework.BundleEvent");
- } catch (final Exception ex) {
- setCacheXZAvailablity(true);
- }
- }
-
- /** Private constructor to prevent instantiation of this utility class. */
- private XZUtils() {
- }
-
- /**
- * Checks if the signature matches what is expected for a .xz file.
- *
- *
This is more or less a copy of the version found in {@link
- * XZCompressorInputStream} but doesn't depend on the presence of
- * XZ for Java.
- *
- * @param signature the bytes to check
- * @param length the number of bytes to check
- * @return true if signature matches the .xz magic bytes, false otherwise
- * @since 1.9
- */
- public static boolean matches(final byte[] signature, final int length) {
- if (length < HEADER_MAGIC.length) {
- return false;
- }
-
- for (int i = 0; i < HEADER_MAGIC.length; ++i) {
- if (signature[i] != HEADER_MAGIC[i]) {
- return false;
- }
- }
-
- return true;
- }
-
- /**
- * Are the classes required to support XZ compression available?
- * @since 1.5
- * @return true if the classes required to support XZ compression are available
- */
- public static boolean isXZCompressionAvailable() {
- final CachedAvailability cachedResult = cachedXZAvailability;
- if (cachedResult != CachedAvailability.DONT_CACHE) {
- return cachedResult == CachedAvailability.CACHED_AVAILABLE;
- }
- return internalIsXZCompressionAvailable();
- }
-
- private static boolean internalIsXZCompressionAvailable() {
- try {
- XZCompressorInputStream.matches(null, 0);
- return true;
- } catch (final NoClassDefFoundError error) {
- return false;
- }
- }
-
- /**
- * Detects common xz suffixes in the given filename.
- *
- * @param filename name of a file
- * @return {@code true} if the filename has a common xz suffix,
- * {@code false} otherwise
- */
- public static boolean isCompressedFilename(final String filename) {
- return fileNameUtil.isCompressedFilename(filename);
- }
-
- /**
- * Maps the given name of a xz-compressed file to the name that the
- * file should have after uncompression. Commonly used file type specific
- * suffixes like ".txz" are automatically detected and
- * correctly mapped. For example the name "package.txz" is mapped to
- * "package.tar". And any filenames with the generic ".xz" suffix
- * (or any other generic xz suffix) is mapped to a name without that
- * suffix. If no xz suffix is detected, then the filename is returned
- * unmapped.
- *
- * @param filename name of a file
- * @return name of the corresponding uncompressed file
- */
- public static String getUncompressedFilename(final String filename) {
- return fileNameUtil.getUncompressedFilename(filename);
- }
-
- /**
- * Maps the given filename to the name that the file should have after
- * compression with xz. Common file types with custom suffixes for
- * compressed versions are automatically detected and correctly mapped.
- * For example the name "package.tar" is mapped to "package.txz". If no
- * custom mapping is applicable, then the default ".xz" suffix is appended
- * to the filename.
- *
- * @param filename name of a file
- * @return name of the corresponding compressed file
- */
- public static String getCompressedFilename(final String filename) {
- return fileNameUtil.getCompressedFilename(filename);
- }
-
- /**
- * Whether to cache the result of the XZ for Java check.
- *
- *
This defaults to {@code false} in an OSGi environment and {@code true} otherwise.
- * @param doCache whether to cache the result
- * @since 1.9
- */
- public static void setCacheXZAvailablity(final boolean doCache) {
- if (!doCache) {
- cachedXZAvailability = CachedAvailability.DONT_CACHE;
- } else if (cachedXZAvailability == CachedAvailability.DONT_CACHE) {
- final boolean hasXz = internalIsXZCompressionAvailable();
- cachedXZAvailability = hasXz ? CachedAvailability.CACHED_AVAILABLE // NOSONAR
- : CachedAvailability.CACHED_UNAVAILABLE;
- }
- }
-
- // only exists to support unit tests
- static CachedAvailability getCachedXZAvailability() {
- return cachedXZAvailability;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/xz/package.html b/src/org/apache/commons/compress/compressors/xz/package.html
deleted file mode 100644
index 48eca2518de..00000000000
--- a/src/org/apache/commons/compress/compressors/xz/package.html
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
-
-
Provides stream classes for compressing and decompressing
- streams using the XZ algorithm.
-
-
The classes in this package are wrappers around {@link
- org.tukaani.xz.XZInputStream org.tukaani.xz.XZInputStream} and
- {@link org.tukaani.xz.XZOutputStream
- org.tukaani.xz.XZOutputStream} provided by the public
- domain XZ for Java
- library.
-
-
diff --git a/src/org/apache/commons/compress/compressors/z/ZCompressorInputStream.java b/src/org/apache/commons/compress/compressors/z/ZCompressorInputStream.java
deleted file mode 100644
index b7ce16f3313..00000000000
--- a/src/org/apache/commons/compress/compressors/z/ZCompressorInputStream.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.z;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteOrder;
-
-import org.apache.commons.compress.compressors.lzw.LZWInputStream;
-
-/**
- * Input stream that decompresses .Z files.
- * @NotThreadSafe
- * @since 1.7
- */
-public class ZCompressorInputStream extends LZWInputStream {
- private static final int MAGIC_1 = 0x1f;
- private static final int MAGIC_2 = 0x9d;
- private static final int BLOCK_MODE_MASK = 0x80;
- private static final int MAX_CODE_SIZE_MASK = 0x1f;
- private final boolean blockMode;
- private final int maxCodeSize;
- private long totalCodesRead = 0;
-
- public ZCompressorInputStream(final InputStream inputStream, final int memoryLimitInKb)
- throws IOException {
- super(inputStream, ByteOrder.LITTLE_ENDIAN);
- final int firstByte = (int) in.readBits(8);
- final int secondByte = (int) in.readBits(8);
- final int thirdByte = (int) in.readBits(8);
- if (firstByte != MAGIC_1 || secondByte != MAGIC_2 || thirdByte < 0) {
- throw new IOException("Input is not in .Z format");
- }
- blockMode = (thirdByte & BLOCK_MODE_MASK) != 0;
- maxCodeSize = thirdByte & MAX_CODE_SIZE_MASK;
- if (blockMode) {
- setClearCode(DEFAULT_CODE_SIZE);
- }
- initializeTables(maxCodeSize, memoryLimitInKb);
- clearEntries();
- }
-
- public ZCompressorInputStream(final InputStream inputStream) throws IOException {
- this(inputStream, -1);
- }
-
- private void clearEntries() {
- setTableSize((1 << 8) + (blockMode ? 1 : 0));
- }
-
- /**
- * {@inheritDoc}
- *
This method is only protected for technical reasons
- * and is not part of Commons Compress' published API. It may
- * change or disappear without warning.
- */
- @Override
- protected int readNextCode() throws IOException {
- final int code = super.readNextCode();
- if (code >= 0) {
- ++totalCodesRead;
- }
- return code;
- }
-
- private void reAlignReading() throws IOException {
- // "compress" works in multiples of 8 symbols, each codeBits bits long.
- // When codeBits changes, the remaining unused symbols in the current
- // group of 8 are still written out, in the old codeSize,
- // as garbage values (usually zeroes) that need to be skipped.
- long codeReadsToThrowAway = 8 - (totalCodesRead % 8);
- if (codeReadsToThrowAway == 8) {
- codeReadsToThrowAway = 0;
- }
- for (long i = 0; i < codeReadsToThrowAway; i++) {
- readNextCode();
- }
- in.clearBitCache();
- }
-
- /**
- * {@inheritDoc}
- *
This method is only protected for technical reasons
- * and is not part of Commons Compress' published API. It may
- * change or disappear without warning.
- */
- @Override
- protected int addEntry(final int previousCode, final byte character) throws IOException {
- final int maxTableSize = 1 << getCodeSize();
- final int r = addEntry(previousCode, character, maxTableSize);
- if (getTableSize() == maxTableSize && getCodeSize() < maxCodeSize) {
- reAlignReading();
- incrementCodeSize();
- }
- return r;
- }
-
- /**
- * {@inheritDoc}
- *
This method is only protected for technical reasons
- * and is not part of Commons Compress' published API. It may
- * change or disappear without warning.
- */
- @Override
- protected int decompressNextSymbol() throws IOException {
- //
- // table entry table entry
- // _____________ _____
- // table entry / \ / \
- // ____________/ \ \
- // / / \ / \ \
- // +---+---+---+---+---+---+---+---+---+---+
- // | . | . | . | . | . | . | . | . | . | . |
- // +---+---+---+---+---+---+---+---+---+---+
- // |<--------->|<------------->|<----->|<->|
- // symbol symbol symbol symbol
- //
- final int code = readNextCode();
- if (code < 0) {
- return -1;
- } else if (blockMode && code == getClearCode()) {
- clearEntries();
- reAlignReading();
- resetCodeSize();
- resetPreviousCode();
- return 0;
- } else {
- boolean addedUnfinishedEntry = false;
- if (code == getTableSize()) {
- addRepeatOfPreviousCode();
- addedUnfinishedEntry = true;
- } else if (code > getTableSize()) {
- throw new IOException(String.format("Invalid %d bit code 0x%x", getCodeSize(), code));
- }
- return expandCodeToOutputStack(code, addedUnfinishedEntry);
- }
- }
-
- /**
- * Checks if the signature matches what is expected for a Unix compress file.
- *
- * @param signature
- * the bytes to check
- * @param length
- * the number of bytes to check
- * @return true, if this stream is a Unix compress compressed
- * stream, false otherwise
- *
- * @since 1.9
- */
- public static boolean matches(final byte[] signature, final int length) {
- return length > 3 && signature[0] == MAGIC_1 && signature[1] == (byte) MAGIC_2;
- }
-
-}
diff --git a/src/org/apache/commons/compress/compressors/z/package.html b/src/org/apache/commons/compress/compressors/z/package.html
deleted file mode 100644
index ca9924b786d..00000000000
--- a/src/org/apache/commons/compress/compressors/z/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
Provides stream classes for decompressing
- streams using the "compress" algorithm used to write .Z files.
-
-
diff --git a/src/org/apache/commons/compress/compressors/zstandard/ZstdCompressorInputStream.java b/src/org/apache/commons/compress/compressors/zstandard/ZstdCompressorInputStream.java
deleted file mode 100644
index 7a47f10006d..00000000000
--- a/src/org/apache/commons/compress/compressors/zstandard/ZstdCompressorInputStream.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.compress.compressors.zstandard;
-
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import com.github.luben.zstd.ZstdInputStream;
-import org.apache.commons.compress.compressors.CompressorInputStream;
-import org.apache.commons.compress.utils.CountingInputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.compress.utils.InputStreamStatistics;
-
-/**
- * {@link CompressorInputStream} implementation to decode Zstandard encoded stream.
- * Library relies on Zstandard JNI
- *
- * @since 1.16
- */
-public class ZstdCompressorInputStream extends CompressorInputStream
- implements InputStreamStatistics {
-
- private final CountingInputStream countingStream;
- private final ZstdInputStream decIS;
-
- public ZstdCompressorInputStream(final InputStream in) throws IOException {
- this.decIS = new ZstdInputStream(countingStream = new CountingInputStream(in));
- }
-
- @Override
- public int available() throws IOException {
- return decIS.available();
- }
-
- @Override
- public void close() throws IOException {
- decIS.close();
- }
-
- @Override
- public int read(final byte[] b) throws IOException {
- return decIS.read(b);
- }
-
- @Override
- public long skip(final long n) throws IOException {
- return IOUtils.skip(decIS, n);
- }
-
- @Override
- public void mark(final int readlimit) {
- decIS.mark(readlimit);
- }
-
- @Override
- public boolean markSupported() {
- return decIS.markSupported();
- }
-
- @Override
- public int read() throws IOException {
- final int ret = decIS.read();
- count(ret == -1 ? 0 : 1);
- return ret;
- }
-
- @Override
- public int read(final byte[] buf, final int off, final int len) throws IOException {
- final int ret = decIS.read(buf, off, len);
- count(ret);
- return ret;
- }
-
- @Override
- public String toString() {
- return decIS.toString();
- }
-
- @Override
- public void reset() throws IOException {
- decIS.reset();
- }
-
- /**
- * @since 1.17
- */
- @Override
- public long getCompressedCount() {
- return countingStream.getBytesRead();
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/zstandard/ZstdCompressorOutputStream.java b/src/org/apache/commons/compress/compressors/zstandard/ZstdCompressorOutputStream.java
deleted file mode 100644
index b54dfa73d75..00000000000
--- a/src/org/apache/commons/compress/compressors/zstandard/ZstdCompressorOutputStream.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.compress.compressors.zstandard;
-
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import com.github.luben.zstd.ZstdOutputStream;
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-
-/**
- * {@link CompressorOutputStream} implementation to create Zstandard encoded stream.
- * Library relies on Zstandard JNI
- *
- * @since 1.16
- */
-public class ZstdCompressorOutputStream extends CompressorOutputStream {
-
- private final ZstdOutputStream encOS;
-
- public ZstdCompressorOutputStream(final OutputStream out) throws IOException {
- this.encOS = new ZstdOutputStream(out);
- }
-
- @Override
- public void close() throws IOException {
- encOS.close();
- }
-
- @Override
- public void write(final int b) throws IOException {
- encOS.write(b);
- }
-
- @Override
- public void write(final byte[] buf, final int off, final int len) throws IOException {
- encOS.write(buf, off, len);
- }
-
- @Override
- public String toString() {
- return encOS.toString();
- }
-
- @Override
- public void flush() throws IOException {
- encOS.flush();
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/zstandard/ZstdUtils.java b/src/org/apache/commons/compress/compressors/zstandard/ZstdUtils.java
deleted file mode 100644
index 8b2f8d3d717..00000000000
--- a/src/org/apache/commons/compress/compressors/zstandard/ZstdUtils.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.compressors.zstandard;
-
-/**
- * Utility code for the Zstandard compression format.
- * @ThreadSafe
- * @since 1.16
- */
-public class ZstdUtils {
-
- enum CachedAvailability {
- DONT_CACHE, CACHED_AVAILABLE, CACHED_UNAVAILABLE
- }
-
- /**
- * Zstandard Frame Magic Bytes.
- */
- private static final byte[] ZSTANDARD_FRAME_MAGIC = {
- (byte) 0x28, (byte) 0xB5, (byte) 0x2F, (byte) 0xFD
- };
-
- /**
- * Skippable Frame Magic Bytes - the three common bytes.
- */
- private static final byte[] SKIPPABLE_FRAME_MAGIC = {
- (byte) 0x2A, (byte) 0x4D, (byte) 0x18
- };
-
- private static volatile CachedAvailability cachedZstdAvailability;
-
- static {
- cachedZstdAvailability = CachedAvailability.DONT_CACHE;
- try {
- Class.forName("org.osgi.framework.BundleEvent");
- } catch (final Exception ex) { // NOSONAR
- setCacheZstdAvailablity(true);
- }
- }
-
- /** Private constructor to prevent instantiation of this utility class. */
- private ZstdUtils() {
- }
-
- /**
- * Are the classes required to support Zstandard compression available?
- * @return true if the classes required to support Zstandard compression are available
- */
- public static boolean isZstdCompressionAvailable() {
- final CachedAvailability cachedResult = cachedZstdAvailability;
- if (cachedResult != CachedAvailability.DONT_CACHE) {
- return cachedResult == CachedAvailability.CACHED_AVAILABLE;
- }
- return internalIsZstdCompressionAvailable();
- }
-
- private static boolean internalIsZstdCompressionAvailable() {
- try {
- Class.forName("com.github.luben.zstd.ZstdInputStream");
- return true;
- } catch (NoClassDefFoundError | Exception error) { // NOSONAR
- return false;
- }
- }
-
- /**
- * Whether to cache the result of the Zstandard for Java check.
- *
- *
This defaults to {@code false} in an OSGi environment and {@code true} otherwise.
- * @param doCache whether to cache the result
- */
- public static void setCacheZstdAvailablity(final boolean doCache) {
- if (!doCache) {
- cachedZstdAvailability = CachedAvailability.DONT_CACHE;
- } else if (cachedZstdAvailability == CachedAvailability.DONT_CACHE) {
- final boolean hasZstd = internalIsZstdCompressionAvailable();
- cachedZstdAvailability = hasZstd ? CachedAvailability.CACHED_AVAILABLE
- : CachedAvailability.CACHED_UNAVAILABLE;
- }
- }
-
- /**
- * Checks if the signature matches what is expected for a Zstandard file.
- *
- * @param signature the bytes to check
- * @param length the number of bytes to check
- * @return true if signature matches the Ztstandard or skippable
- * frame magic bytes, false otherwise
- */
- public static boolean matches(final byte[] signature, final int length) {
- if (length < ZSTANDARD_FRAME_MAGIC.length) {
- return false;
- }
-
- boolean isZstandard = true;
- for (int i = 0; i < ZSTANDARD_FRAME_MAGIC.length; ++i) {
- if (signature[i] != ZSTANDARD_FRAME_MAGIC[i]) {
- isZstandard = false;
- break;
- }
- }
- if (isZstandard) {
- return true;
- }
-
- if (0x50 == (signature[0] & 0xF0)) {
- // skippable frame
- for (int i = 0; i < SKIPPABLE_FRAME_MAGIC.length; ++i) {
- if (signature[i + 1] != SKIPPABLE_FRAME_MAGIC[i]) {
- return false;
- }
- }
-
- return true;
- }
-
- return false;
- }
-
- // only exists to support unit tests
- static CachedAvailability getCachedZstdAvailability() {
- return cachedZstdAvailability;
- }
-}
diff --git a/src/org/apache/commons/compress/compressors/zstandard/package.html b/src/org/apache/commons/compress/compressors/zstandard/package.html
deleted file mode 100644
index 6deb74fcf0b..00000000000
--- a/src/org/apache/commons/compress/compressors/zstandard/package.html
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-
-
Provides stream class for (de)compressing streams using the
- Zstandard algorithm based
- on Zstandard
- JNI.
-
-
diff --git a/src/org/apache/commons/compress/parallel/FileBasedScatterGatherBackingStore.java b/src/org/apache/commons/compress/parallel/FileBasedScatterGatherBackingStore.java
deleted file mode 100644
index 92b447fabb1..00000000000
--- a/src/org/apache/commons/compress/parallel/FileBasedScatterGatherBackingStore.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.parallel;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.file.Files;
-
-/**
- * ScatterGatherBackingStore that is backed by a file.
- *
- * @since 1.10
- */
-public class FileBasedScatterGatherBackingStore implements ScatterGatherBackingStore {
- private final File target;
- private final OutputStream os;
- private boolean closed;
-
- public FileBasedScatterGatherBackingStore(final File target) throws FileNotFoundException {
- this.target = target;
- try {
- os = Files.newOutputStream(target.toPath());
- } catch (FileNotFoundException ex) {
- throw ex;
- } catch (IOException ex) {
- // must convert exception to stay backwards compatible with Compress 1.10 to 1.13
- throw new RuntimeException(ex); // NOSONAR
- }
- }
-
- @Override
- public InputStream getInputStream() throws IOException {
- return Files.newInputStream(target.toPath());
- }
-
- @Override
- @SuppressWarnings("ResultOfMethodCallIgnored")
- public void closeForWriting() throws IOException {
- if (!closed) {
- os.close();
- closed = true;
- }
- }
-
- @Override
- public void writeOut(final byte[] data, final int offset, final int length) throws IOException {
- os.write(data, offset, length);
- }
-
- @Override
- public void close() throws IOException {
- closeForWriting();
- target.delete();
- }
-}
diff --git a/src/org/apache/commons/compress/parallel/InputStreamSupplier.java b/src/org/apache/commons/compress/parallel/InputStreamSupplier.java
deleted file mode 100644
index f227e643132..00000000000
--- a/src/org/apache/commons/compress/parallel/InputStreamSupplier.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.parallel;
-
-import java.io.InputStream;
-
-/**
- * Supplies input streams.
- *
- * Implementations are required to support thread-handover. While an instance will
- * not be accessed concurrently by multiple threads, it will be called by
- * a different thread than it was created on.
- *
- * @since 1.10
- */
-public interface InputStreamSupplier {
-
- /**
- * Supply an input stream for a resource.
- * @return the input stream. Should never null, but may be an empty stream.
- */
- InputStream get();
-}
diff --git a/src/org/apache/commons/compress/parallel/ScatterGatherBackingStore.java b/src/org/apache/commons/compress/parallel/ScatterGatherBackingStore.java
deleted file mode 100644
index ea726ff8a7b..00000000000
--- a/src/org/apache/commons/compress/parallel/ScatterGatherBackingStore.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.parallel;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- *
Store intermediate payload in a scatter-gather scenario.
- * Multiple threads write their payload to a backing store, which can
- * subsequently be reversed to an {@link InputStream} to be used as input in the
- * gather phase.
- *
- *
It is the responsibility of the allocator of an instance of this class
- * to close this. Closing it should clear off any allocated structures
- * and preferably delete files.
- *
- * @since 1.10
- */
-public interface ScatterGatherBackingStore extends Closeable {
-
- /**
- * An input stream that contains the scattered payload
- *
- * @return An InputStream, should be closed by the caller of this method.
- * @throws IOException when something fails
- */
- InputStream getInputStream() throws IOException;
-
- /**
- * Writes a piece of payload.
- *
- * @param data the data to write
- * @param offset offset inside data to start writing from
- * @param length the amount of data to write
- * @throws IOException when something fails
- */
- void writeOut(byte[] data, int offset, int length) throws IOException;
-
- /**
- * Closes this backing store for further writing.
- * @throws IOException when something fails
- */
- void closeForWriting() throws IOException;
-}
diff --git a/src/org/apache/commons/compress/parallel/ScatterGatherBackingStoreSupplier.java b/src/org/apache/commons/compress/parallel/ScatterGatherBackingStoreSupplier.java
deleted file mode 100644
index 9a216a7b823..00000000000
--- a/src/org/apache/commons/compress/parallel/ScatterGatherBackingStoreSupplier.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.parallel;
-
-import java.io.IOException;
-
-/**
- * Supplies {@link ScatterGatherBackingStore} instances.
- *
- * @since 1.10
- */
-public interface ScatterGatherBackingStoreSupplier {
- /**
- * Create a ScatterGatherBackingStore.
- *
- * @return a ScatterGatherBackingStore, not null
- * @throws IOException when something fails
- */
- ScatterGatherBackingStore get() throws IOException;
-}
diff --git a/src/org/apache/commons/compress/parallel/package.html b/src/org/apache/commons/compress/parallel/package.html
deleted file mode 100644
index 3517bc57526..00000000000
--- a/src/org/apache/commons/compress/parallel/package.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
-
Provides common API classes for parallel compression features.
-
-
diff --git a/src/org/apache/commons/compress/utils/ArchiveUtils.java b/src/org/apache/commons/compress/utils/ArchiveUtils.java
deleted file mode 100644
index 3fe3fbadd15..00000000000
--- a/src/org/apache/commons/compress/utils/ArchiveUtils.java
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.utils;
-
-import java.io.UnsupportedEncodingException;
-import java.util.Arrays;
-
-import org.apache.commons.compress.archivers.ArchiveEntry;
-
-/**
- * Generic Archive utilities
- */
-public class ArchiveUtils {
-
- private static final int MAX_SANITIZED_NAME_LENGTH = 255;
-
- /** Private constructor to prevent instantiation of this utility class. */
- private ArchiveUtils(){
- }
-
- /**
- * Generates a string containing the name, isDirectory setting and size of an entry.
- *
- * For example:
- *
- * - 2000 main.c
- * d 100 testfiles
- *
- *
- * @param entry the entry
- * @return the representation of the entry
- */
- public static String toString(final ArchiveEntry entry){
- final StringBuilder sb = new StringBuilder();
- sb.append(entry.isDirectory()? 'd' : '-');// c.f. "ls -l" output
- final String size = Long.toString(entry.getSize());
- sb.append(' ');
- // Pad output to 7 places, leading spaces
- for(int i=7; i > size.length(); i--){
- sb.append(' ');
- }
- sb.append(size);
- sb.append(' ').append(entry.getName());
- return sb.toString();
- }
-
- /**
- * Check if buffer contents matches Ascii String.
- *
- * @param expected expected string
- * @param buffer the buffer
- * @param offset offset to read from
- * @param length length of the buffer
- * @return {@code true} if buffer is the same as the expected string
- */
- public static boolean matchAsciiBuffer(
- final String expected, final byte[] buffer, final int offset, final int length){
- byte[] buffer1;
- try {
- buffer1 = expected.getBytes(CharsetNames.US_ASCII);
- } catch (final UnsupportedEncodingException e) {
- // Should not happen
- throw new RuntimeException(e); //NOSONAR
- }
- return isEqual(buffer1, 0, buffer1.length, buffer, offset, length, false);
- }
-
- /**
- * Check if buffer contents matches Ascii String.
- *
- * @param expected the expected strin
- * @param buffer the buffer
- * @return {@code true} if buffer is the same as the expected string
- */
- public static boolean matchAsciiBuffer(final String expected, final byte[] buffer){
- return matchAsciiBuffer(expected, buffer, 0, buffer.length);
- }
-
- /**
- * Convert a string to Ascii bytes.
- * Used for comparing "magic" strings which need to be independent of the default Locale.
- *
- * @param inputString string to convert
- * @return the bytes
- */
- public static byte[] toAsciiBytes(final String inputString){
- try {
- return inputString.getBytes(CharsetNames.US_ASCII);
- } catch (final UnsupportedEncodingException e) {
- // Should never happen
- throw new RuntimeException(e); //NOSONAR
- }
- }
-
- /**
- * Convert an input byte array to a String using the ASCII character set.
- *
- * @param inputBytes bytes to convert
- * @return the bytes, interpreted as an Ascii string
- */
- public static String toAsciiString(final byte[] inputBytes){
- try {
- return new String(inputBytes, CharsetNames.US_ASCII);
- } catch (final UnsupportedEncodingException e) {
- // Should never happen
- throw new RuntimeException(e); //NOSONAR
- }
- }
-
- /**
- * Convert an input byte array to a String using the ASCII character set.
- *
- * @param inputBytes input byte array
- * @param offset offset within array
- * @param length length of array
- * @return the bytes, interpreted as an Ascii string
- */
- public static String toAsciiString(final byte[] inputBytes, final int offset, final int length){
- try {
- return new String(inputBytes, offset, length, CharsetNames.US_ASCII);
- } catch (final UnsupportedEncodingException e) {
- // Should never happen
- throw new RuntimeException(e); //NOSONAR
- }
- }
-
- /**
- * Compare byte buffers, optionally ignoring trailing nulls
- *
- * @param buffer1 first buffer
- * @param offset1 first offset
- * @param length1 first length
- * @param buffer2 second buffer
- * @param offset2 second offset
- * @param length2 second length
- * @param ignoreTrailingNulls whether to ignore trailing nulls
- * @return {@code true} if buffer1 and buffer2 have same contents, having regard to trailing nulls
- */
- public static boolean isEqual(
- final byte[] buffer1, final int offset1, final int length1,
- final byte[] buffer2, final int offset2, final int length2,
- final boolean ignoreTrailingNulls){
- final int minLen=length1 < length2 ? length1 : length2;
- for (int i=0; i < minLen; i++){
- if (buffer1[offset1+i] != buffer2[offset2+i]){
- return false;
- }
- }
- if (length1 == length2){
- return true;
- }
- if (ignoreTrailingNulls){
- if (length1 > length2){
- for(int i = length2; i < length1; i++){
- if (buffer1[offset1+i] != 0){
- return false;
- }
- }
- } else {
- for(int i = length1; i < length2; i++){
- if (buffer2[offset2+i] != 0){
- return false;
- }
- }
- }
- return true;
- }
- return false;
- }
-
- /**
- * Compare byte buffers
- *
- * @param buffer1 the first buffer
- * @param offset1 the first offset
- * @param length1 the first length
- * @param buffer2 the second buffer
- * @param offset2 the second offset
- * @param length2 the second length
- * @return {@code true} if buffer1 and buffer2 have same contents
- */
- public static boolean isEqual(
- final byte[] buffer1, final int offset1, final int length1,
- final byte[] buffer2, final int offset2, final int length2){
- return isEqual(buffer1, offset1, length1, buffer2, offset2, length2, false);
- }
-
- /**
- * Compare byte buffers
- *
- * @param buffer1 the first buffer
- * @param buffer2 the second buffer
- * @return {@code true} if buffer1 and buffer2 have same contents
- */
- public static boolean isEqual(final byte[] buffer1, final byte[] buffer2 ){
- return isEqual(buffer1, 0, buffer1.length, buffer2, 0, buffer2.length, false);
- }
-
- /**
- * Compare byte buffers, optionally ignoring trailing nulls
- *
- * @param buffer1 the first buffer
- * @param buffer2 the second buffer
- * @param ignoreTrailingNulls whether to ignore tariling nulls
- * @return {@code true} if buffer1 and buffer2 have same contents
- */
- public static boolean isEqual(final byte[] buffer1, final byte[] buffer2, final boolean ignoreTrailingNulls){
- return isEqual(buffer1, 0, buffer1.length, buffer2, 0, buffer2.length, ignoreTrailingNulls);
- }
-
- /**
- * Compare byte buffers, ignoring trailing nulls
- *
- * @param buffer1 the first buffer
- * @param offset1 the first offset
- * @param length1 the first length
- * @param buffer2 the second buffer
- * @param offset2 the second offset
- * @param length2 the second length
- * @return {@code true} if buffer1 and buffer2 have same contents, having regard to trailing nulls
- */
- public static boolean isEqualWithNull(
- final byte[] buffer1, final int offset1, final int length1,
- final byte[] buffer2, final int offset2, final int length2){
- return isEqual(buffer1, offset1, length1, buffer2, offset2, length2, true);
- }
-
- /**
- * Returns true if the first N bytes of an array are all zero
- *
- * @param a
- * The array to check
- * @param size
- * The number of characters to check (not the size of the array)
- * @return true if the first N bytes are zero
- */
- public static boolean isArrayZero(final byte[] a, final int size) {
- for (int i = 0; i < size; i++) {
- if (a[i] != 0) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Returns a "sanitized" version of the string given as arguments,
- * where sanitized means non-printable characters have been
- * replaced with a question mark and the outcome is not longer
- * than 255 chars.
- *
- *
This method is used to clean up file names when they are
- * used in exception messages as they may end up in log files or
- * as console output and may have been read from a corrupted
- * input.
- *
- * @param s the string to sanitize
- * @return a sanitized version of the argument
- * @since Compress 1.12
- */
- public static String sanitize(final String s) {
- final char[] cs = s.toCharArray();
- final char[] chars = cs.length <= MAX_SANITIZED_NAME_LENGTH ? cs : Arrays.copyOf(cs, MAX_SANITIZED_NAME_LENGTH);
- if (cs.length > MAX_SANITIZED_NAME_LENGTH) {
- for (int i = MAX_SANITIZED_NAME_LENGTH - 3; i < MAX_SANITIZED_NAME_LENGTH; i++) {
- chars[i] = '.';
- }
- }
- final StringBuilder sb = new StringBuilder();
- for (final char c : chars) {
- if (!Character.isISOControl(c)) {
- final Character.UnicodeBlock block = Character.UnicodeBlock.of(c);
- if (block != null && block != Character.UnicodeBlock.SPECIALS) {
- sb.append(c);
- continue;
- }
- }
- sb.append('?');
- }
- return sb.toString();
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/BitInputStream.java b/src/org/apache/commons/compress/utils/BitInputStream.java
deleted file mode 100644
index 8abff5f786a..00000000000
--- a/src/org/apache/commons/compress/utils/BitInputStream.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.utils;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteOrder;
-
-/**
- * Reads bits from an InputStream.
- * @since 1.10
- * @NotThreadSafe
- */
-public class BitInputStream implements Closeable {
- private static final int MAXIMUM_CACHE_SIZE = 63; // bits in long minus sign bit
- private static final long[] MASKS = new long[MAXIMUM_CACHE_SIZE + 1];
-
- static {
- for (int i = 1; i <= MAXIMUM_CACHE_SIZE; i++) {
- MASKS[i] = (MASKS[i - 1] << 1) + 1;
- }
- }
-
- private final CountingInputStream in;
- private final ByteOrder byteOrder;
- private long bitsCached = 0;
- private int bitsCachedSize = 0;
-
- /**
- * Constructor taking an InputStream and its bit arrangement.
- * @param in the InputStream
- * @param byteOrder the bit arrangement across byte boundaries,
- * either BIG_ENDIAN (aaaaabbb bb000000) or LITTLE_ENDIAN (bbbaaaaa 000000bb)
- */
- public BitInputStream(final InputStream in, final ByteOrder byteOrder) {
- this.in = new CountingInputStream(in);
- this.byteOrder = byteOrder;
- }
-
- @Override
- public void close() throws IOException {
- in.close();
- }
-
- /**
- * Clears the cache of bits that have been read from the
- * underlying stream but not yet provided via {@link #readBits}.
- */
- public void clearBitCache() {
- bitsCached = 0;
- bitsCachedSize = 0;
- }
-
- /**
- * Returns at most 63 bits read from the underlying stream.
- *
- * @param count the number of bits to read, must be a positive
- * number not bigger than 63.
- * @return the bits concatenated as a long using the stream's byte order.
- * -1 if the end of the underlying stream has been reached before reading
- * the requested number of bits
- * @throws IOException on error
- */
- public long readBits(final int count) throws IOException {
- if (count < 0 || count > MAXIMUM_CACHE_SIZE) {
- throw new IllegalArgumentException("count must not be negative or greater than " + MAXIMUM_CACHE_SIZE);
- }
- if (ensureCache(count)) {
- return -1;
- }
-
- if (bitsCachedSize < count) {
- return processBitsGreater57(count);
- }
- return readCachedBits(count);
- }
-
- /**
- * Returns the number of bits that can be read from this input
- * stream without reading from the underlying input stream at all.
- * @return estimate of the number of bits that can be read without reading from the underlying stream
- * @since 1.16
- */
- public int bitsCached() {
- return bitsCachedSize;
- }
-
- /**
- * Returns an estimate of the number of bits that can be read from
- * this input stream without blocking by the next invocation of a
- * method for this input stream.
- * @throws IOException if the underlying stream throws one when calling available
- * @return estimate of the number of bits that can be read without blocking
- * @since 1.16
- */
- public long bitsAvailable() throws IOException {
- return bitsCachedSize + ((long) Byte.SIZE) * in.available();
- }
-
- /**
- * Drops bits until the next bits will be read from a byte boundary.
- * @since 1.16
- */
- public void alignWithByteBoundary() {
- int toSkip = bitsCachedSize % Byte.SIZE;
- if (toSkip > 0) {
- readCachedBits(toSkip);
- }
- }
-
- /**
- * Returns the number of bytes read from the underlying stream.
- *
- *
This includes the bytes read to fill the current cache and
- * not read as bits so far.
- * @return the number of bytes read from the underlying stream
- * @since 1.17
- */
- public long getBytesRead() {
- return in.getBytesRead();
- }
-
- private long processBitsGreater57(final int count) throws IOException {
- final long bitsOut;
- int overflowBits = 0;
- long overflow = 0L;
-
- // bitsCachedSize >= 57 and left-shifting it 8 bits would cause an overflow
- int bitsToAddCount = count - bitsCachedSize;
- overflowBits = Byte.SIZE - bitsToAddCount;
- final long nextByte = in.read();
- if (nextByte < 0) {
- return nextByte;
- }
- if (byteOrder == ByteOrder.LITTLE_ENDIAN) {
- long bitsToAdd = nextByte & MASKS[bitsToAddCount];
- bitsCached |= (bitsToAdd << bitsCachedSize);
- overflow = (nextByte >>> bitsToAddCount) & MASKS[overflowBits];
- } else {
- bitsCached <<= bitsToAddCount;
- long bitsToAdd = (nextByte >>> (overflowBits)) & MASKS[bitsToAddCount];
- bitsCached |= bitsToAdd;
- overflow = nextByte & MASKS[overflowBits];
- }
- bitsOut = bitsCached & MASKS[count];
- bitsCached = overflow;
- bitsCachedSize = overflowBits;
- return bitsOut;
- }
-
- private long readCachedBits(int count) {
- final long bitsOut;
- if (byteOrder == ByteOrder.LITTLE_ENDIAN) {
- bitsOut = (bitsCached & MASKS[count]);
- bitsCached >>>= count;
- } else {
- bitsOut = (bitsCached >> (bitsCachedSize - count)) & MASKS[count];
- }
- bitsCachedSize -= count;
- return bitsOut;
- }
-
- /**
- * Fills the cache up to 56 bits
- * @param count
- * @return return true, when EOF
- * @throws IOException
- */
- private boolean ensureCache(final int count) throws IOException {
- while (bitsCachedSize < count && bitsCachedSize < 57) {
- final long nextByte = in.read();
- if (nextByte < 0) {
- return true;
- }
- if (byteOrder == ByteOrder.LITTLE_ENDIAN) {
- bitsCached |= (nextByte << bitsCachedSize);
- } else {
- bitsCached <<= Byte.SIZE;
- bitsCached |= nextByte;
- }
- bitsCachedSize += Byte.SIZE;
- }
- return false;
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/BoundedInputStream.java b/src/org/apache/commons/compress/utils/BoundedInputStream.java
deleted file mode 100644
index 8c3465ded31..00000000000
--- a/src/org/apache/commons/compress/utils/BoundedInputStream.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.utils;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * A stream that limits reading from a wrapped stream to a given number of bytes.
- * @NotThreadSafe
- * @since 1.6
- */
-public class BoundedInputStream extends InputStream {
- private final InputStream in;
- private long bytesRemaining;
-
- /**
- * Creates the stream that will at most read the given amount of
- * bytes from the given stream.
- * @param in the stream to read from
- * @param size the maximum amount of bytes to read
- */
- public BoundedInputStream(final InputStream in, final long size) {
- this.in = in;
- bytesRemaining = size;
- }
-
- @Override
- public int read() throws IOException {
- if (bytesRemaining > 0) {
- --bytesRemaining;
- return in.read();
- }
- return -1;
- }
-
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- if (bytesRemaining == 0) {
- return -1;
- }
- int bytesToRead = len;
- if (bytesToRead > bytesRemaining) {
- bytesToRead = (int) bytesRemaining;
- }
- final int bytesRead = in.read(b, off, bytesToRead);
- if (bytesRead >= 0) {
- bytesRemaining -= bytesRead;
- }
- return bytesRead;
- }
-
- @Override
- public void close() {
- // there isn't anything to close in this stream and the nested
- // stream is controlled externally
- }
-}
diff --git a/src/org/apache/commons/compress/utils/ByteUtils.java b/src/org/apache/commons/compress/utils/ByteUtils.java
deleted file mode 100644
index 85b4118e972..00000000000
--- a/src/org/apache/commons/compress/utils/ByteUtils.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.utils;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-/**
- * Utility methods for reading and writing bytes.
- * @since 1.14
- */
-public final class ByteUtils {
- private ByteUtils() { /* no instances */ }
-
- /**
- * Used to supply bytes.
- * @since 1.14
- */
- public interface ByteSupplier {
- /**
- * The contract is similar to {@link InputStream#read()}, return
- * the byte as an unsigned int, -1 if there are no more bytes.
- * @return the supplied byte or -1 if there are no more bytes
- * @throws IOException if supplying fails
- */
- int getAsByte() throws IOException;
- }
-
- /**
- * Used to consume bytes.
- * @since 1.14
- */
- public interface ByteConsumer {
- /**
- * The contract is similar to {@link OutputStream#write(int)},
- * consume the lower eight bytes of the int as a byte.
- * @param b the byte to consume
- * @throws IOException if consuming fails
- */
- void accept(int b) throws IOException;
- }
-
- /**
- * Reads the given byte array as a little endian long.
- * @param bytes the byte array to convert
- * @return the number read
- */
- public static long fromLittleEndian(byte[] bytes) {
- return fromLittleEndian(bytes, 0, bytes.length);
- }
-
- /**
- * Reads the given byte array as a little endian long.
- * @param bytes the byte array to convert
- * @param off the offset into the array that starts the value
- * @param length the number of bytes representing the value
- * @return the number read
- * @throws IllegalArgumentException if len is bigger than eight
- */
- public static long fromLittleEndian(byte[] bytes, final int off, final int length) {
- checkReadLength(length);
- long l = 0;
- for (int i = 0; i < length; i++) {
- l |= (bytes[off + i] & 0xffL) << (8 * i);
- }
- return l;
- }
-
- /**
- * Reads the given number of bytes from the given stream as a little endian long.
- * @param in the stream to read from
- * @param length the number of bytes representing the value
- * @return the number read
- * @throws IllegalArgumentException if len is bigger than eight
- * @throws IOException if reading fails or the stream doesn't
- * contain the given number of bytes anymore
- */
- public static long fromLittleEndian(InputStream in, int length) throws IOException {
- // somewhat duplicates the ByteSupplier version in order to save the creation of a wrapper object
- checkReadLength(length);
- long l = 0;
- for (int i = 0; i < length; i++) {
- long b = in.read();
- if (b == -1) {
- throw new IOException("premature end of data");
- }
- l |= (b << (i * 8));
- }
- return l;
- }
-
- /**
- * Reads the given number of bytes from the given supplier as a little endian long.
- *
- *
Typically used by our InputStreams that need to count the
- * bytes read as well.
- *
- * @param supplier the supplier for bytes
- * @param length the number of bytes representing the value
- * @return the number read
- * @throws IllegalArgumentException if len is bigger than eight
- * @throws IOException if the supplier fails or doesn't supply the
- * given number of bytes anymore
- */
- public static long fromLittleEndian(ByteSupplier supplier, final int length) throws IOException {
- checkReadLength(length);
- long l = 0;
- for (int i = 0; i < length; i++) {
- long b = supplier.getAsByte();
- if (b == -1) {
- throw new IOException("premature end of data");
- }
- l |= (b << (i * 8));
- }
- return l;
- }
-
- /**
- * Reads the given number of bytes from the given input as little endian long.
- * @param in the input to read from
- * @param length the number of bytes representing the value
- * @return the number read
- * @throws IllegalArgumentException if len is bigger than eight
- * @throws IOException if reading fails or the stream doesn't
- * contain the given number of bytes anymore
- */
- public static long fromLittleEndian(DataInput in, int length) throws IOException {
- // somewhat duplicates the ByteSupplier version in order to save the creation of a wrapper object
- checkReadLength(length);
- long l = 0;
- for (int i = 0; i < length; i++) {
- long b = in.readUnsignedByte();
- l |= (b << (i * 8));
- }
- return l;
- }
-
- /**
- * Inserts the given value into the array as a little endian
- * sequence of the given length starting at the given offset.
- * @param b the array to write into
- * @param value the value to insert
- * @param off the offset into the array that receives the first byte
- * @param length the number of bytes to use to represent the value
- */
- public static void toLittleEndian(final byte[] b, final long value, final int off, final int length) {
- long num = value;
- for (int i = 0; i < length; i++) {
- b[off + i] = (byte) (num & 0xff);
- num >>= 8;
- }
- }
-
- /**
- * Writes the given value to the given stream as a little endian
- * array of the given length.
- * @param out the stream to write to
- * @param value the value to write
- * @param length the number of bytes to use to represent the value
- * @throws IOException if writing fails
- */
- public static void toLittleEndian(OutputStream out, final long value, final int length)
- throws IOException {
- // somewhat duplicates the ByteConsumer version in order to save the creation of a wrapper object
- long num = value;
- for (int i = 0; i < length; i++) {
- out.write((int) (num & 0xff));
- num >>= 8;
- }
- }
-
- /**
- * Provides the given value to the given consumer as a little endian
- * sequence of the given length.
- * @param consumer the consumer to provide the bytes to
- * @param value the value to provide
- * @param length the number of bytes to use to represent the value
- * @throws IOException if writing fails
- */
- public static void toLittleEndian(ByteConsumer consumer, final long value, final int length)
- throws IOException {
- long num = value;
- for (int i = 0; i < length; i++) {
- consumer.accept((int) (num & 0xff));
- num >>= 8;
- }
- }
-
- /**
- * Writes the given value to the given stream as a little endian
- * array of the given length.
- * @param out the output to write to
- * @param value the value to write
- * @param length the number of bytes to use to represent the value
- * @throws IOException if writing fails
- */
- public static void toLittleEndian(DataOutput out, final long value, final int length)
- throws IOException {
- // somewhat duplicates the ByteConsumer version in order to save the creation of a wrapper object
- long num = value;
- for (int i = 0; i < length; i++) {
- out.write((int) (num & 0xff));
- num >>= 8;
- }
- }
-
- /**
- * {@link ByteSupplier} based on {@link InputStream}.
- * @since 1.14
- */
- public static class InputStreamByteSupplier implements ByteSupplier {
- private final InputStream is;
- public InputStreamByteSupplier(InputStream is) {
- this.is = is;
- }
- @Override
- public int getAsByte() throws IOException {
- return is.read();
- }
- }
-
- /**
- * {@link ByteConsumer} based on {@link OutputStream}.
- * @since 1.14
- */
- public static class OutputStreamByteConsumer implements ByteConsumer {
- private final OutputStream os;
- public OutputStreamByteConsumer(OutputStream os) {
- this.os = os;
- }
- @Override
- public void accept(int b) throws IOException {
- os.write(b);
- }
- }
-
- private static final void checkReadLength(int length) {
- if (length > 8) {
- throw new IllegalArgumentException("can't read more than eight bytes into a long value");
- }
- }
-}
diff --git a/src/org/apache/commons/compress/utils/CRC32VerifyingInputStream.java b/src/org/apache/commons/compress/utils/CRC32VerifyingInputStream.java
deleted file mode 100644
index 1b50e8ce572..00000000000
--- a/src/org/apache/commons/compress/utils/CRC32VerifyingInputStream.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.utils;
-
-import java.io.InputStream;
-import java.util.zip.CRC32;
-
-/**
- * A stream that verifies the CRC of the data read once the stream is
- * exhausted.
- * @NotThreadSafe
- * @since 1.6
- */
-public class CRC32VerifyingInputStream extends ChecksumVerifyingInputStream {
-
- /**
- * @param in the stream to wrap
- * @param size the of the stream's content
- * @param expectedCrc32 the expected checksum
- */
- public CRC32VerifyingInputStream(final InputStream in, final long size, final int expectedCrc32) {
- this(in, size, expectedCrc32 & 0xFFFFffffL);
- }
-
- /**
- * @since 1.7
- * @param in the stream to wrap
- * @param size the of the stream's content
- * @param expectedCrc32 the expected checksum
- */
- public CRC32VerifyingInputStream(final InputStream in, final long size, final long expectedCrc32) {
- super(new CRC32(), in, size, expectedCrc32);
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/CharsetNames.java b/src/org/apache/commons/compress/utils/CharsetNames.java
deleted file mode 100644
index f6b9cc8ed18..00000000000
--- a/src/org/apache/commons/compress/utils/CharsetNames.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.utils;
-
-/**
- * Character encoding names required of every implementation of the Java platform.
- *
- * From the Java documentation Standard
- * charsets:
- *
- * Every implementation of the Java platform is required to support the following character encodings. Consult the
- * release documentation for your implementation to see if any other encodings are supported. Consult the release
- * documentation for your implementation to see if any other encodings are supported.
- *
- *
- *
- *
US-ASCII
- *
Seven-bit ASCII, a.k.a. ISO646-US, a.k.a. the Basic Latin block of the Unicode character set.
- *
ISO-8859-1
- *
ISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1.
- *
UTF-8
- *
Eight-bit Unicode Transformation Format.
- *
UTF-16BE
- *
Sixteen-bit Unicode Transformation Format, big-endian byte order.
- *
UTF-16LE
- *
Sixteen-bit Unicode Transformation Format, little-endian byte order.
- *
UTF-16
- *
Sixteen-bit Unicode Transformation Format, byte order specified by a mandatory initial byte-order mark (either order
- * accepted on input, big-endian used on output.)
- *
- *
- *
This perhaps would best belong in the [lang] project. Even if a similar interface is defined in [lang], it is not
- * foreseen that [compress] would be made to depend on [lang].
- *
- * @see Standard charsets
- * @since 1.4
- */
-public class CharsetNames {
- /**
- * CharEncodingISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1.
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- */
- public static final String ISO_8859_1 = "ISO-8859-1";
-
- /**
- *
- * Seven-bit ASCII, also known as ISO646-US, also known as the Basic Latin block of the Unicode character set.
- *
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- */
- public static final String US_ASCII = "US-ASCII";
-
- /**
- *
- * Sixteen-bit Unicode Transformation Format, The byte order specified by a mandatory initial byte-order mark
- * (either order accepted on input, big-endian used on output)
- *
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- */
- public static final String UTF_16 = "UTF-16";
-
- /**
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- */
- public static final String UTF_16LE = "UTF-16LE";
-
- /**
- *
- * Eight-bit Unicode Transformation Format.
- *
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- */
- public static final String UTF_8 = "UTF-8";
-}
diff --git a/src/org/apache/commons/compress/utils/Charsets.java b/src/org/apache/commons/compress/utils/Charsets.java
deleted file mode 100644
index 9f5240af89f..00000000000
--- a/src/org/apache/commons/compress/utils/Charsets.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.utils;
-
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-
-/**
- * Charsets required of every implementation of the Java platform.
- *
- * From the Java documentation Standard
- * charsets:
- *
- * Every implementation of the Java platform is required to support the following character encodings. Consult the
- * release documentation for your implementation to see if any other encodings are supported. Consult the release
- * documentation for your implementation to see if any other encodings are supported.
- *
- *
- *
- *
US-ASCII
- *
Seven-bit ASCII, a.k.a. ISO646-US, a.k.a. the Basic Latin block of the Unicode character set.
- *
ISO-8859-1
- *
ISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1.
- *
UTF-8
- *
Eight-bit Unicode Transformation Format.
- *
UTF-16BE
- *
Sixteen-bit Unicode Transformation Format, big-endian byte order.
- *
UTF-16LE
- *
Sixteen-bit Unicode Transformation Format, little-endian byte order.
- *
UTF-16
- *
Sixteen-bit Unicode Transformation Format, byte order specified by a mandatory initial byte-order mark (either order
- * accepted on input, big-endian used on output.)
- *
- *
- *
This class best belongs in the Commons Lang or IO project. Even if a similar class is defined in another Commons
- * component, it is not foreseen that Commons Compress would be made to depend on another Commons component.
- *
- * @see Standard charsets
- * @see StandardCharsets
- * @since 1.4
- */
-public class Charsets {
-
- //
- // This class should only contain Charset instances for required encodings. This guarantees that it will load correctly and
- // without delay on all Java platforms.
- //
-
- /**
- * Returns the given Charset or the default Charset if the given Charset is null.
- *
- * @param charset
- * A charset or null.
- * @return the given Charset or the default Charset if the given Charset is null
- */
- public static Charset toCharset(final Charset charset) {
- return charset == null ? Charset.defaultCharset() : charset;
- }
-
- /**
- * Returns a Charset for the named charset. If the name is null, return the default Charset.
- *
- * @param charset
- * The name of the requested charset, may be null.
- * @return a Charset for the named charset
- * @throws java.nio.charset.UnsupportedCharsetException
- * If the named charset is unavailable
- * @throws java.nio.charset.IllegalCharsetNameException
- * If the given charset name is illegal
- */
- public static Charset toCharset(final String charset) {
- return charset == null ? Charset.defaultCharset() : Charset.forName(charset);
- }
-
- /**
- * CharsetNamesISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1.
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- * @deprecated replaced by {@link StandardCharsets} in Java 7
- */
- public static final Charset ISO_8859_1 = StandardCharsets.ISO_8859_1;
-
- /**
- *
- * Seven-bit ASCII, also known as ISO646-US, also known as the Basic Latin block of the Unicode character set.
- *
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- * @deprecated replaced by {@link StandardCharsets} in Java 7
- */
- public static final Charset US_ASCII = StandardCharsets.US_ASCII;
-
- /**
- *
- * Sixteen-bit Unicode Transformation Format, The byte order specified by a mandatory initial byte-order mark
- * (either order accepted on input, big-endian used on output)
- *
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- * @deprecated replaced by {@link StandardCharsets} in Java 7
- */
- public static final Charset UTF_16 = StandardCharsets.UTF_16;
-
- /**
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- * @deprecated replaced by {@link StandardCharsets} in Java 7
- */
- public static final Charset UTF_16LE = StandardCharsets.UTF_16LE;
-
- /**
- *
- * Eight-bit Unicode Transformation Format.
- *
- *
- * Every implementation of the Java platform is required to support this character encoding.
- *
- *
- * @see Standard charsets
- * @deprecated replaced by {@link StandardCharsets} in Java 7
- */
- public static final Charset UTF_8 = StandardCharsets.UTF_8;
-}
diff --git a/src/org/apache/commons/compress/utils/ChecksumCalculatingInputStream.java b/src/org/apache/commons/compress/utils/ChecksumCalculatingInputStream.java
deleted file mode 100644
index 4a408a564ce..00000000000
--- a/src/org/apache/commons/compress/utils/ChecksumCalculatingInputStream.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.utils;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.zip.Checksum;
-
-/**
- * A stream that calculates the checksum of the data read.
- * @NotThreadSafe
- * @since 1.14
- */
-public class ChecksumCalculatingInputStream extends InputStream {
- private final InputStream in;
- private final Checksum checksum;
-
- public ChecksumCalculatingInputStream(final Checksum checksum, final InputStream in) {
-
- if ( checksum == null ){
- throw new NullPointerException("Parameter checksum must not be null");
- }
-
- if ( in == null ){
- throw new NullPointerException("Parameter in must not be null");
- }
-
- this.checksum = checksum;
- this.in = in;
- }
-
- /**
- * Reads a single byte from the stream
- * @throws IOException if the underlying stream throws or the
- * stream is exhausted and the Checksum doesn't match the expected
- * value
- */
- @Override
- public int read() throws IOException {
- final int ret = in.read();
- if (ret >= 0) {
- checksum.update(ret);
- }
- return ret;
- }
-
- /**
- * Reads a byte array from the stream
- * @throws IOException if the underlying stream throws or the
- * stream is exhausted and the Checksum doesn't match the expected
- * value
- */
- @Override
- public int read(final byte[] b) throws IOException {
- return read(b, 0, b.length);
- }
-
- /**
- * Reads from the stream into a byte array.
- * @throws IOException if the underlying stream throws or the
- * stream is exhausted and the Checksum doesn't match the expected
- * value
- */
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- final int ret = in.read(b, off, len);
- if (ret >= 0) {
- checksum.update(b, off, ret);
- }
- return ret;
- }
-
- @Override
- public long skip(final long n) throws IOException {
- // Can't really skip, we have to hash everything to verify the checksum
- if (read() >= 0) {
- return 1;
- }
- return 0;
- }
-
- /**
- * Returns the calculated checksum.
- * @return the calculated checksum.
- */
- public long getValue() {
- return checksum.getValue();
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/ChecksumVerifyingInputStream.java b/src/org/apache/commons/compress/utils/ChecksumVerifyingInputStream.java
deleted file mode 100644
index a7d8d6ce511..00000000000
--- a/src/org/apache/commons/compress/utils/ChecksumVerifyingInputStream.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.commons.compress.utils;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.zip.Checksum;
-
-/**
- * A stream that verifies the checksum of the data read once the stream is
- * exhausted.
- * @NotThreadSafe
- * @since 1.7
- */
-public class ChecksumVerifyingInputStream extends InputStream {
- private final InputStream in;
- private long bytesRemaining;
- private final long expectedChecksum;
- private final Checksum checksum;
-
- public ChecksumVerifyingInputStream(final Checksum checksum, final InputStream in,
- final long size, final long expectedChecksum) {
- this.checksum = checksum;
- this.in = in;
- this.expectedChecksum = expectedChecksum;
- this.bytesRemaining = size;
- }
-
- /**
- * Reads a single byte from the stream
- * @throws IOException if the underlying stream throws or the
- * stream is exhausted and the Checksum doesn't match the expected
- * value
- */
- @Override
- public int read() throws IOException {
- if (bytesRemaining <= 0) {
- return -1;
- }
- final int ret = in.read();
- if (ret >= 0) {
- checksum.update(ret);
- --bytesRemaining;
- }
- if (bytesRemaining == 0 && expectedChecksum != checksum.getValue()) {
- throw new IOException("Checksum verification failed");
- }
- return ret;
- }
-
- /**
- * Reads a byte array from the stream
- * @throws IOException if the underlying stream throws or the
- * stream is exhausted and the Checksum doesn't match the expected
- * value
- */
- @Override
- public int read(final byte[] b) throws IOException {
- return read(b, 0, b.length);
- }
-
- /**
- * Reads from the stream into a byte array.
- * @throws IOException if the underlying stream throws or the
- * stream is exhausted and the Checksum doesn't match the expected
- * value
- */
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- final int ret = in.read(b, off, len);
- if (ret >= 0) {
- checksum.update(b, off, ret);
- bytesRemaining -= ret;
- }
- if (bytesRemaining <= 0 && expectedChecksum != checksum.getValue()) {
- throw new IOException("Checksum verification failed");
- }
- return ret;
- }
-
- @Override
- public long skip(final long n) throws IOException {
- // Can't really skip, we have to hash everything to verify the checksum
- if (read() >= 0) {
- return 1;
- }
- return 0;
- }
-
- @Override
- public void close() throws IOException {
- in.close();
- }
-}
diff --git a/src/org/apache/commons/compress/utils/CloseShieldFilterInputStream.java b/src/org/apache/commons/compress/utils/CloseShieldFilterInputStream.java
deleted file mode 100644
index a0ec8ff4b51..00000000000
--- a/src/org/apache/commons/compress/utils/CloseShieldFilterInputStream.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.utils;
-
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * Re-implements {@link FilterInputStream#close()} to do nothing.
- * @since 1.14
- */
-public class CloseShieldFilterInputStream extends FilterInputStream {
-
- public CloseShieldFilterInputStream(InputStream in) {
- super(in);
- }
-
- @Override
- public void close() throws IOException {
- // NO IMPLEMENTATION.
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/CountingInputStream.java b/src/org/apache/commons/compress/utils/CountingInputStream.java
deleted file mode 100644
index 461071e8dce..00000000000
--- a/src/org/apache/commons/compress/utils/CountingInputStream.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.utils;
-
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * Stream that tracks the number of bytes read.
- * @since 1.3
- * @NotThreadSafe
- */
-public class CountingInputStream extends FilterInputStream {
- private long bytesRead;
-
- public CountingInputStream(final InputStream in) {
- super(in);
- }
-
- @Override
- public int read() throws IOException {
- final int r = in.read();
- if (r >= 0) {
- count(1);
- }
- return r;
- }
- @Override
- public int read(final byte[] b) throws IOException {
- return read(b, 0, b.length);
- }
- @Override
- public int read(final byte[] b, final int off, final int len) throws IOException {
- final int r = in.read(b, off, len);
- if (r >= 0) {
- count(r);
- }
- return r;
- }
- /**
- * Increments the counter of already read bytes.
- * Doesn't increment if the EOF has been hit (read == -1)
- *
- * @param read the number of bytes read
- */
- protected final void count(final long read) {
- if (read != -1) {
- bytesRead += read;
- }
- }
-
- /**
- * Returns the current number of bytes read from this stream.
- * @return the number of read bytes
- */
- public long getBytesRead() {
- return bytesRead;
- }
-}
diff --git a/src/org/apache/commons/compress/utils/CountingOutputStream.java b/src/org/apache/commons/compress/utils/CountingOutputStream.java
deleted file mode 100644
index ac886bcd442..00000000000
--- a/src/org/apache/commons/compress/utils/CountingOutputStream.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.utils;
-
-import java.io.FilterOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * Stream that tracks the number of bytes read.
- * @since 1.3
- * @NotThreadSafe
- */
-public class CountingOutputStream extends FilterOutputStream {
- private long bytesWritten = 0;
-
- public CountingOutputStream(final OutputStream out) {
- super(out);
- }
-
- @Override
- public void write(final int b) throws IOException {
- out.write(b);
- count(1);
- }
- @Override
- public void write(final byte[] b) throws IOException {
- write(b, 0, b.length);
- }
- @Override
- public void write(final byte[] b, final int off, final int len) throws IOException {
- out.write(b, off, len);
- count(len);
- }
-
- /**
- * Increments the counter of already written bytes.
- * Doesn't increment if the EOF has been hit (written == -1)
- *
- * @param written the number of bytes written
- */
- protected void count(final long written) {
- if (written != -1) {
- bytesWritten += written;
- }
- }
-
- /**
- * Returns the current number of bytes written to this stream.
- * @return the number of written bytes
- */
- public long getBytesWritten() {
- return bytesWritten;
- }
-}
diff --git a/src/org/apache/commons/compress/utils/FixedLengthBlockOutputStream.java b/src/org/apache/commons/compress/utils/FixedLengthBlockOutputStream.java
deleted file mode 100644
index d9f2f80f3f5..00000000000
--- a/src/org/apache/commons/compress/utils/FixedLengthBlockOutputStream.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.utils;
-
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.channels.ClosedChannelException;
-import java.nio.channels.WritableByteChannel;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * This class supports writing to an Outputstream or WritableByteChannel in fixed length blocks.
- *
It can be be used to support output to devices such as tape drives that require output in this
- * format. If the final block does not have enough content to fill an entire block, the output will
- * be padded to a full block size.
- *
- *
This class can be used to support TAR,PAX, and CPIO blocked output to character special devices.
- * It is not recommended that this class be used unless writing to such devices, as the padding
- * serves no useful purpose in such cases.
- *
- *
This class should normally wrap a FileOutputStream or associated WritableByteChannel directly.
- * If there is an intervening filter that modified the output, such as a CompressorOutputStream, or
- * performs its own buffering, such as BufferedOutputStream, output to the device may
- * no longer be of the specified size.
- *
- *
Any content written to this stream should be self-delimiting and should tolerate any padding
- * added to fill the last block.
- *
- * @since 1.15
- */
-public class FixedLengthBlockOutputStream extends OutputStream implements WritableByteChannel {
-
- private final WritableByteChannel out;
- private final int blockSize;
- private final ByteBuffer buffer;
- private final AtomicBoolean closed = new AtomicBoolean(false);
-
- /**
- * Create a fixed length block output stream with given destination stream and block size
- * @param os The stream to wrap.
- * @param blockSize The block size to use.
- */
- public FixedLengthBlockOutputStream(OutputStream os, int blockSize) {
- if (os instanceof FileOutputStream) {
- FileOutputStream fileOutputStream = (FileOutputStream) os;
- out = fileOutputStream.getChannel();
- buffer = ByteBuffer.allocateDirect(blockSize);
- } else {
- out = new BufferAtATimeOutputChannel(os);
- buffer = ByteBuffer.allocate(blockSize);
- }
- this.blockSize = blockSize;
- }
- /**
- * Create a fixed length block output stream with given destination writable byte channel and block size
- * @param out The writable byte channel to wrap.
- * @param blockSize The block size to use.
- */
- public FixedLengthBlockOutputStream(WritableByteChannel out, int blockSize) {
- this.out = out;
- this.blockSize = blockSize;
- this.buffer = ByteBuffer.allocateDirect(blockSize);
- }
-
- private void maybeFlush() throws IOException {
- if (!buffer.hasRemaining()) {
- writeBlock();
- }
- }
-
- private void writeBlock() throws IOException {
- buffer.flip();
- int i = out.write(buffer);
- boolean hasRemaining = buffer.hasRemaining();
- if (i != blockSize || hasRemaining) {
- String msg = String
- .format("Failed to write %,d bytes atomically. Only wrote %,d",
- blockSize, i);
- throw new IOException(msg);
- }
- buffer.clear();
- }
-
- @Override
- public void write(int b) throws IOException {
- if (!isOpen()) {
- throw new ClosedChannelException();
- }
- buffer.put((byte) b);
- maybeFlush();
- }
-
- @Override
- public void write(byte[] b, final int offset, final int length) throws IOException {
- if (!isOpen()) {
- throw new ClosedChannelException();
- }
- int off = offset;
- int len = length;
- while (len > 0) {
- int n = Math.min(len, buffer.remaining());
- buffer.put(b, off, n);
- maybeFlush();
- len -= n;
- off += n;
- }
- }
-
- @Override
- public int write(ByteBuffer src) throws IOException {
- if (!isOpen()) {
- throw new ClosedChannelException();
- }
- int srcRemaining = src.remaining();
-
- if (srcRemaining < buffer.remaining()) {
- // if don't have enough bytes in src to fill up a block we must buffer
- buffer.put(src);
- } else {
- int srcLeft = srcRemaining;
- int savedLimit = src.limit();
- // If we're not at the start of buffer, we have some bytes already buffered
- // fill up the reset of buffer and write the block.
- if (buffer.position() != 0) {
- int n = buffer.remaining();
- src.limit(src.position() + n);
- buffer.put(src);
- writeBlock();
- srcLeft -= n;
- }
- // whilst we have enough bytes in src for complete blocks,
- // write them directly from src without copying them to buffer
- while (srcLeft >= blockSize) {
- src.limit(src.position() + blockSize);
- out.write(src);
- srcLeft -= blockSize;
- }
- // copy any remaining bytes into buffer
- src.limit(savedLimit);
- buffer.put(src);
- }
- return srcRemaining;
- }
-
- @Override
- public boolean isOpen() {
- if (!out.isOpen()) {
- closed.set(true);
- }
- return !closed.get();
- }
-
- /**
- * Potentially pads and then writes the current block to the underlying stream.
- * @throws IOException if writing fails
- */
- public void flushBlock() throws IOException {
- if (buffer.position() != 0) {
- padBlock();
- writeBlock();
- }
- }
-
- @Override
- public void close() throws IOException {
- if (closed.compareAndSet(false, true)) {
- flushBlock();
- out.close();
- }
- }
-
- private void padBlock() {
- buffer.order(ByteOrder.nativeOrder());
- int bytesToWrite = buffer.remaining();
- if (bytesToWrite > 8) {
- int align = buffer.position() & 7;
- if (align != 0) {
- int limit = 8 - align;
- for (int i = 0; i < limit; i++) {
- buffer.put((byte) 0);
- }
- bytesToWrite -= limit;
- }
-
- while (bytesToWrite >= 8) {
- buffer.putLong(0L);
- bytesToWrite -= 8;
- }
- }
- while (buffer.hasRemaining()) {
- buffer.put((byte) 0);
- }
- }
-
- /**
- * Helper class to provide channel wrapper for arbitrary output stream that doesn't alter the
- * size of writes. We can't use Channels.newChannel, because for non FileOutputStreams, it
- * breaks up writes into 8KB max chunks. Since the purpose of this class is to always write
- * complete blocks, we need to write a simple class to take care of it.
- */
- private static class BufferAtATimeOutputChannel implements WritableByteChannel {
-
- private final OutputStream out;
- private final AtomicBoolean closed = new AtomicBoolean(false);
-
- private BufferAtATimeOutputChannel(OutputStream out) {
- this.out = out;
- }
-
- @Override
- public int write(ByteBuffer buffer) throws IOException {
- if (!isOpen()) {
- throw new ClosedChannelException();
- }
- if (!buffer.hasArray()) {
- throw new IllegalArgumentException("direct buffer somehow written to BufferAtATimeOutputChannel");
- }
-
- try {
- int pos = buffer.position();
- int len = buffer.limit() - pos;
- out.write(buffer.array(), buffer.arrayOffset() + pos, len);
- buffer.position(buffer.limit());
- return len;
- } catch (IOException e) {
- try {
- close();
- } catch (IOException ignored) { //NOSONAR
- }
- throw e;
- }
- }
-
- @Override
- public boolean isOpen() {
- return !closed.get();
- }
-
- @Override
- public void close() throws IOException {
- if (closed.compareAndSet(false, true)) {
- out.close();
- }
- }
-
- }
-
-
-}
diff --git a/src/org/apache/commons/compress/utils/FlushShieldFilterOutputStream.java b/src/org/apache/commons/compress/utils/FlushShieldFilterOutputStream.java
deleted file mode 100644
index 239e8237406..00000000000
--- a/src/org/apache/commons/compress/utils/FlushShieldFilterOutputStream.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.utils;
-
-import java.io.FilterOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * Re-implements {@link FilterOutputStream#flush()} to do nothing.
- */
-public class FlushShieldFilterOutputStream extends FilterOutputStream {
-
- public FlushShieldFilterOutputStream(OutputStream out) {
- super(out);
- }
-
- @Override
- public void flush() throws IOException {
- // NO IMPLEMENTATION.
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/IOUtils.java b/src/org/apache/commons/compress/utils/IOUtils.java
deleted file mode 100644
index 50577c90dd5..00000000000
--- a/src/org/apache/commons/compress/utils/IOUtils.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.utils;
-
-import java.io.ByteArrayOutputStream;
-import java.io.Closeable;
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.nio.channels.ReadableByteChannel;
-
-/**
- * Utility functions
- * @Immutable (has mutable data but it is write-only)
- */
-public final class IOUtils {
-
- private static final int COPY_BUF_SIZE = 8024;
- private static final int SKIP_BUF_SIZE = 4096;
-
- // This buffer does not need to be synchronised because it is write only; the contents are ignored
- // Does not affect Immutability
- private static final byte[] SKIP_BUF = new byte[SKIP_BUF_SIZE];
-
- /** Private constructor to prevent instantiation of this utility class. */
- private IOUtils(){
- }
-
- /**
- * Copies the content of a InputStream into an OutputStream.
- * Uses a default buffer size of 8024 bytes.
- *
- * @param input
- * the InputStream to copy
- * @param output
- * the target Stream
- * @return the number of bytes copied
- * @throws IOException
- * if an error occurs
- */
- public static long copy(final InputStream input, final OutputStream output) throws IOException {
- return copy(input, output, COPY_BUF_SIZE);
- }
-
- /**
- * Copies the content of a InputStream into an OutputStream
- *
- * @param input
- * the InputStream to copy
- * @param output
- * the target Stream
- * @param buffersize
- * the buffer size to use, must be bigger than 0
- * @return the number of bytes copied
- * @throws IOException
- * if an error occurs
- * @throws IllegalArgumentException
- * if buffersize is smaller than or equal to 0
- */
- public static long copy(final InputStream input, final OutputStream output, final int buffersize) throws IOException {
- if (buffersize < 1) {
- throw new IllegalArgumentException("buffersize must be bigger than 0");
- }
- final byte[] buffer = new byte[buffersize];
- int n = 0;
- long count=0;
- while (-1 != (n = input.read(buffer))) {
- output.write(buffer, 0, n);
- count += n;
- }
- return count;
- }
-
- /**
- * Skips the given number of bytes by repeatedly invoking skip on
- * the given input stream if necessary.
- *
- *
In a case where the stream's skip() method returns 0 before
- * the requested number of bytes has been skip this implementation
- * will fall back to using the read() method.
- *
- *
This method will only skip less than the requested number of
- * bytes if the end of the input stream has been reached.
- *
- * @param input stream to skip bytes in
- * @param numToSkip the number of bytes to skip
- * @return the number of bytes actually skipped
- * @throws IOException on error
- */
- public static long skip(final InputStream input, long numToSkip) throws IOException {
- final long available = numToSkip;
- while (numToSkip > 0) {
- final long skipped = input.skip(numToSkip);
- if (skipped == 0) {
- break;
- }
- numToSkip -= skipped;
- }
-
- while (numToSkip > 0) {
- final int read = readFully(input, SKIP_BUF, 0,
- (int) Math.min(numToSkip, SKIP_BUF_SIZE));
- if (read < 1) {
- break;
- }
- numToSkip -= read;
- }
- return available - numToSkip;
- }
-
- /**
- * Reads as much from input as possible to fill the given array.
- *
- *
This method may invoke read repeatedly to fill the array and
- * only read less bytes than the length of the array if the end of
- * the stream has been reached.
- *
- * @param input stream to read from
- * @param b buffer to fill
- * @return the number of bytes actually read
- * @throws IOException on error
- */
- public static int readFully(final InputStream input, final byte[] b) throws IOException {
- return readFully(input, b, 0, b.length);
- }
-
- /**
- * Reads as much from input as possible to fill the given array
- * with the given amount of bytes.
- *
- *
This method may invoke read repeatedly to read the bytes and
- * only read less bytes than the requested length if the end of
- * the stream has been reached.
- *
- * @param input stream to read from
- * @param b buffer to fill
- * @param offset offset into the buffer to start filling at
- * @param len of bytes to read
- * @return the number of bytes actually read
- * @throws IOException
- * if an I/O error has occurred
- */
- public static int readFully(final InputStream input, final byte[] b, final int offset, final int len)
- throws IOException {
- if (len < 0 || offset < 0 || len + offset > b.length) {
- throw new IndexOutOfBoundsException();
- }
- int count = 0, x = 0;
- while (count != len) {
- x = input.read(b, offset + count, len - count);
- if (x == -1) {
- break;
- }
- count += x;
- }
- return count;
- }
-
- /**
- * Reads {@code b.remaining()} bytes from the given channel
- * starting at the current channel's position.
- *
- *
This method reads repeatedly from the channel until the
- * requested number of bytes are read. This method blocks until
- * the requested number of bytes are read, the end of the channel
- * is detected, or an exception is thrown.
- *
- * @param channel the channel to read from
- * @param b the buffer into which the data is read.
- * @throws IOException - if an I/O error occurs.
- * @throws EOFException - if the channel reaches the end before reading all the bytes.
- */
- public static void readFully(ReadableByteChannel channel, ByteBuffer b) throws IOException {
- final int expectedLength = b.remaining();
- int read = 0;
- while (read < expectedLength) {
- int readNow = channel.read(b);
- if (readNow <= 0) {
- break;
- }
- read += readNow;
- }
- if (read < expectedLength) {
- throw new EOFException();
- }
- }
-
- // toByteArray(InputStream) copied from:
- // commons/proper/io/trunk/src/main/java/org/apache/commons/io/IOUtils.java?revision=1428941
- // January 8th, 2013
- //
- // Assuming our copy() works just as well as theirs! :-)
-
- /**
- * Gets the contents of an InputStream as a byte[].
- *
- * This method buffers the input internally, so there is no need to use a
- * BufferedInputStream.
- *
- * @param input the InputStream to read from
- * @return the requested byte array
- * @throws NullPointerException if the input is null
- * @throws IOException if an I/O error occurs
- * @since 1.5
- */
- public static byte[] toByteArray(final InputStream input) throws IOException {
- final ByteArrayOutputStream output = new ByteArrayOutputStream();
- copy(input, output);
- return output.toByteArray();
- }
-
- /**
- * Closes the given Closeable and swallows any IOException that may occur.
- * @param c Closeable to close, can be null
- * @since 1.7
- */
- public static void closeQuietly(final Closeable c) {
- if (c != null) {
- try {
- c.close();
- } catch (final IOException ignored) { // NOPMD
- }
- }
- }
-}
diff --git a/src/org/apache/commons/compress/utils/InputStreamStatistics.java b/src/org/apache/commons/compress/utils/InputStreamStatistics.java
deleted file mode 100644
index 569ab3687b7..00000000000
--- a/src/org/apache/commons/compress/utils/InputStreamStatistics.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.utils;
-
-/**
- * This interface provides statistics on the current decompression stream.
- * The stream consumer can use that statistics to handle abnormal
- * compression ratios, i.e. to prevent zip bombs.
- *
- * @since 1.17
- */
-public interface InputStreamStatistics {
- /**
- * @return the amount of raw or compressed bytes read by the stream
- */
- long getCompressedCount();
-
- /**
- * @return the amount of decompressed bytes returned by the stream
- */
- long getUncompressedCount();
-}
diff --git a/src/org/apache/commons/compress/utils/Iterators.java b/src/org/apache/commons/compress/utils/Iterators.java
deleted file mode 100644
index 0db0c36017f..00000000000
--- a/src/org/apache/commons/compress/utils/Iterators.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.utils;
-
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Objects;
-
-/**
- * Iterator utilities.
- *
- * @since 1.13.
- */
-public class Iterators {
-
- /**
- * Adds all the elements in the source {@code iterator} to the target
- * {@code collection}.
- *
- *
- * When this method returns, the {@code iterator} will be "empty": its
- * {@code hasNext()} method returns {@code false}.
- *
- *
- * @param type of the elements contained inside the collection
- * @param collection target collection
- * @param iterator source
- * @return {@code true} if the target {@code collection} was modified as a
- * result of this operation
- */
- public static boolean addAll(final Collection collection, final Iterator extends T> iterator) {
- Objects.requireNonNull(collection);
- Objects.requireNonNull(iterator);
- boolean wasModified = false;
- while (iterator.hasNext()) {
- wasModified |= collection.add(iterator.next());
- }
- return wasModified;
- }
-
- private Iterators() {
- // do not instantiate
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/Lists.java b/src/org/apache/commons/compress/utils/Lists.java
deleted file mode 100644
index e7a82dc6a75..00000000000
--- a/src/org/apache/commons/compress/utils/Lists.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.utils;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-
-/**
- * List utilities
- *
- * @since 1.13
- */
-public class Lists {
-
- /**
- * Creates a new {@link ArrayList}.
- *
- * @param type of elements contained in new list
- * @return a new {@link ArrayList}
- */
- public static ArrayList newArrayList() {
- return new ArrayList<>();
- }
-
- /**
- * Creates a new {@link ArrayList} filled with the contents of the given
- * {@code iterator}.
- *
- * @param iterator
- * the source iterator
- * @param type of elements contained in new list
- * @return a new {@link ArrayList}
- */
- public static ArrayList newArrayList(final Iterator extends E> iterator) {
- final ArrayList list = newArrayList();
- Iterators.addAll(list, iterator);
- return list;
- }
-
- private Lists() {
- // do not instantiate
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/NoCloseInputStream.java b/src/org/apache/commons/compress/utils/NoCloseInputStream.java
deleted file mode 100644
index bdc0ee9b9ff..00000000000
--- a/src/org/apache/commons/compress/utils/NoCloseInputStream.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.commons.compress.utils;
-
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * Wrapper that overrides {@link #close} so that it doesn't close the
- * underlying stream.
- *
- * @since 1.17
- */
-public class NoCloseInputStream extends FilterInputStream {
-
- public NoCloseInputStream(InputStream in) {
- super(in);
- }
-
- /**
- * This method does nothing.
- */
- public void close() {
- // do not close the stream
- }
-}
diff --git a/src/org/apache/commons/compress/utils/SeekableInMemoryByteChannel.java b/src/org/apache/commons/compress/utils/SeekableInMemoryByteChannel.java
deleted file mode 100644
index eece7f5bb6d..00000000000
--- a/src/org/apache/commons/compress/utils/SeekableInMemoryByteChannel.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.commons.compress.utils;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.ClosedChannelException;
-import java.nio.channels.SeekableByteChannel;
-import java.util.Arrays;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * A {@link SeekableByteChannel} implementation that wraps a byte[].
- *
- *
When this channel is used for writing an internal buffer grows to accommodate
- * incoming data. A natural size limit is the value of {@link Integer#MAX_VALUE}.
- * Internal buffer can be accessed via {@link SeekableInMemoryByteChannel#array()}.
- *
- * @since 1.13
- * @NotThreadSafe
- */
-public class SeekableInMemoryByteChannel implements SeekableByteChannel {
-
- private static final int NAIVE_RESIZE_LIMIT = Integer.MAX_VALUE >> 1;
-
- private byte[] data;
- private final AtomicBoolean closed = new AtomicBoolean();
- private int position, size;
-
- /**
- * Constructor taking a byte array.
- *
- *
This constructor is intended to be used with pre-allocated buffer or when
- * reading from a given byte array.
- *
- * @param data input data or pre-allocated array.
- */
- public SeekableInMemoryByteChannel(byte[] data) {
- this.data = data;
- size = data.length;
- }
-
- /**
- * Parameterless constructor - allocates internal buffer by itself.
- */
- public SeekableInMemoryByteChannel() {
- this(new byte[0]);
- }
-
- /**
- * Constructor taking a size of storage to be allocated.
- *
- *
Creates a channel and allocates internal storage of a given size.
- *
- * @param size size of internal buffer to allocate, in bytes.
- */
- public SeekableInMemoryByteChannel(int size) {
- this(new byte[size]);
- }
-
- @Override
- public long position() {
- return position;
- }
-
- @Override
- public SeekableByteChannel position(long newPosition) throws IOException {
- ensureOpen();
- if (newPosition < 0L || newPosition > Integer.MAX_VALUE) {
- throw new IllegalArgumentException("Position has to be in range 0.. " + Integer.MAX_VALUE);
- }
- position = (int) newPosition;
- return this;
- }
-
- @Override
- public long size() {
- return size;
- }
-
- @Override
- public SeekableByteChannel truncate(long newSize) {
- if (size > newSize) {
- size = (int) newSize;
- }
- repositionIfNecessary();
- return this;
- }
-
- @Override
- public int read(ByteBuffer buf) throws IOException {
- ensureOpen();
- repositionIfNecessary();
- int wanted = buf.remaining();
- int possible = size - position;
- if (possible <= 0) {
- return -1;
- }
- if (wanted > possible) {
- wanted = possible;
- }
- buf.put(data, position, wanted);
- position += wanted;
- return wanted;
- }
-
- @Override
- public void close() {
- closed.set(true);
- }
-
- @Override
- public boolean isOpen() {
- return !closed.get();
- }
-
- @Override
- public int write(ByteBuffer b) throws IOException {
- ensureOpen();
- int wanted = b.remaining();
- int possibleWithoutResize = size - position;
- if (wanted > possibleWithoutResize) {
- int newSize = position + wanted;
- if (newSize < 0) { // overflow
- resize(Integer.MAX_VALUE);
- wanted = Integer.MAX_VALUE - position;
- } else {
- resize(newSize);
- }
- }
- b.get(data, position, wanted);
- position += wanted;
- if (size < position) {
- size = position;
- }
- return wanted;
- }
-
- /**
- * Obtains the array backing this channel.
- *
- *
NOTE:
- * The returned buffer is not aligned with containing data, use
- * {@link #size()} to obtain the size of data stored in the buffer.
- *
- * @return internal byte array.
- */
- public byte[] array() {
- return data;
- }
-
- private void resize(int newLength) {
- int len = data.length;
- if (len <= 0) {
- len = 1;
- }
- if (newLength < NAIVE_RESIZE_LIMIT) {
- while (len < newLength) {
- len <<= 1;
- }
- } else { // avoid overflow
- len = newLength;
- }
- data = Arrays.copyOf(data, len);
- }
-
- private void ensureOpen() throws ClosedChannelException {
- if (!isOpen()) {
- throw new ClosedChannelException();
- }
- }
-
- private void repositionIfNecessary() {
- if (position > size) {
- position = size;
- }
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/ServiceLoaderIterator.java b/src/org/apache/commons/compress/utils/ServiceLoaderIterator.java
deleted file mode 100644
index aeda85721cd..00000000000
--- a/src/org/apache/commons/compress/utils/ServiceLoaderIterator.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.utils;
-
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-import java.util.ServiceConfigurationError;
-import java.util.ServiceLoader;
-
-/**
- * Iterates all services for a given class through the standard
- * {@link ServiceLoader} mechanism.
- *
- * @param
- * The service to load
- * @since 1.13
- */
-public class ServiceLoaderIterator implements Iterator {
-
- private E nextServiceLoader;
- private final Class service;
- private final Iterator serviceLoaderIterator;
-
- public ServiceLoaderIterator(final Class service) {
- this(service, ClassLoader.getSystemClassLoader());
- }
-
- public ServiceLoaderIterator(final Class service, final ClassLoader classLoader) {
- this.service = service;
- final ServiceLoader serviceLoader = ServiceLoader.load(service, classLoader);
- serviceLoaderIterator = serviceLoader.iterator();
- nextServiceLoader = null;
- }
-
- private boolean getNextServiceLoader() {
- while (nextServiceLoader == null) {
- try {
- if (!serviceLoaderIterator.hasNext()) {
- return false;
- }
- nextServiceLoader = serviceLoaderIterator.next();
- } catch (final ServiceConfigurationError e) {
- if (e.getCause() instanceof SecurityException) {
- // Ignore security exceptions
- // TODO Log?
- continue;
- }
- throw e;
- }
- }
- return true;
- }
-
- @Override
- public boolean hasNext() {
- return getNextServiceLoader();
- }
-
- @Override
- public E next() {
- if (!getNextServiceLoader()) {
- throw new NoSuchElementException("No more elements for service " + service.getName());
- }
- final E tempNext = nextServiceLoader;
- nextServiceLoader = null;
- return tempNext;
- }
-
- @Override
- public void remove() {
- throw new UnsupportedOperationException("service=" + service.getName());
- }
-
-}
diff --git a/src/org/apache/commons/compress/utils/Sets.java b/src/org/apache/commons/compress/utils/Sets.java
deleted file mode 100644
index 29981208dfd..00000000000
--- a/src/org/apache/commons/compress/utils/Sets.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.commons.compress.utils;
-
-import java.util.Collections;
-import java.util.HashSet;
-
-/**
- * Set utilities
- *
- * @since 1.13
- */
-public class Sets {
-
- private Sets() {
- // Do not instantiate
- }
-
- /**
- * Creates a new HashSet filled with the given elements
- *
- * @param elements
- * the elements to fill the new set
- * @param