Upgrade rust/crates/zip to 0.5.13 am: 4558110cf1 am: fb023151a8 am: 77bd9cf5d2 am: 79eb7d1db3

Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/zip/+/1742524

Change-Id: Id2b08d5364ae8783f85386b1413204ecc83e3dfa
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 935452f..b1502c1 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,5 @@
 {
   "git": {
-    "sha1": "88e6f87884a906ee63962d3a2cf87f890878a46d"
+    "sha1": "7edf2489d5cff8b80f02ee6fc5febf3efd0a9442"
   }
 }
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 1e7b395..8374189 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -16,7 +16,7 @@
     strategy:
       matrix:
         os: [ubuntu-latest, macOS-latest, windows-latest]
-        rust: [stable, 1.34.0]
+        rust: [stable, 1.36.0]
 
     steps:
     - uses: actions/checkout@master
diff --git a/Android.bp b/Android.bp
index e5b607d..3bd082d 100644
--- a/Android.bp
+++ b/Android.bp
@@ -38,17 +38,16 @@
 
 // dependent_library ["feature_list"]
 //   byteorder-1.4.3 "default,std"
-//   cc-1.0.67
-//   cfg-if-0.1.10
+//   cc-1.0.68
 //   cfg-if-1.0.0
 //   crc32fast-1.2.1 "default,std"
-//   flate2-1.0.14 "any_zlib,libz-sys,zlib"
-//   libc-0.2.94 "default,std"
-//   libz-sys-1.1.3 "default,libc,stock-zlib"
+//   flate2-1.0.20 "any_zlib,libz-sys,zlib"
+//   libc-0.2.97 "default,std"
+//   libz-sys-1.1.3
 //   pkg-config-0.3.19
-//   proc-macro2-1.0.26 "default,proc-macro"
+//   proc-macro2-1.0.27 "default,proc-macro"
 //   quote-1.0.9 "default,proc-macro"
-//   syn-1.0.72 "clone-impls,default,derive,parsing,printing,proc-macro,quote"
-//   thiserror-1.0.24
-//   thiserror-impl-1.0.24
+//   syn-1.0.73 "clone-impls,default,derive,parsing,printing,proc-macro,quote"
+//   thiserror-1.0.25
+//   thiserror-impl-1.0.25
 //   unicode-xid-0.2.2 "default"
diff --git a/Cargo.toml b/Cargo.toml
index 06baad2..e964f60 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,7 +13,7 @@
 [package]
 edition = "2018"
 name = "zip"
-version = "0.5.12"
+version = "0.5.13"
 authors = ["Mathijs van de Nes <[email protected]>", "Marli Frost <[email protected]>", "Ryan Levick <[email protected]>"]
 description = "Library to support the reading and writing of zip files.\n"
 keywords = ["zip", "archive"]
@@ -27,7 +27,7 @@
 version = "1.3"
 
 [dependencies.bzip2]
-version = "0.3"
+version = "0.4"
 optional = true
 
 [dependencies.crc32fast]
@@ -58,3 +58,4 @@
 deflate = ["flate2/rust_backend"]
 deflate-miniz = ["flate2/default"]
 deflate-zlib = ["flate2/zlib"]
+unreserved = []
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index b7e0fb8..2ce7e60 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
 [package]
 name = "zip"
-version = "0.5.12"
+version = "0.5.13"
 authors = ["Mathijs van de Nes <[email protected]>", "Marli Frost <[email protected]>", "Ryan Levick <[email protected]>"]
 license = "MIT"
 repository = "https://github.com/zip-rs/zip.git"
@@ -14,7 +14,7 @@
 flate2 = { version = "1.0.0", default-features = false, optional = true }
 time = { version = "0.1", optional = true }
 byteorder = "1.3"
-bzip2 = { version = "0.3", optional = true }
+bzip2 = { version = "0.4", optional = true }
 crc32fast = "1.0"
 thiserror = "1.0"
 
@@ -27,6 +27,7 @@
 deflate = ["flate2/rust_backend"]
 deflate-miniz = ["flate2/default"]
 deflate-zlib = ["flate2/zlib"]
+unreserved = []
 default = ["bzip2", "deflate", "time"]
 
 [[bench]]
diff --git a/METADATA b/METADATA
index 4fc4c19..33abb6b 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@
   }
   url {
     type: ARCHIVE
-    value: "https://static.crates.io/crates/zip/zip-0.5.12.crate"
+    value: "https://static.crates.io/crates/zip/zip-0.5.13.crate"
   }
-  version: "0.5.12"
+  version: "0.5.13"
   license_type: NOTICE
   last_upgrade_date {
     year: 2021
-    month: 5
-    day: 19
+    month: 6
+    day: 21
   }
 }
diff --git a/README.md b/README.md
index f6a28cc..e489b98 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
 [![Build Status](https://img.shields.io/github/workflow/status/zip-rs/zip/CI)](https://github.com/zip-rs/zip/actions?query=branch%3Amaster+workflow%3ACI)
 [![Crates.io version](https://img.shields.io/crates/v/zip.svg)](https://crates.io/crates/zip)
 
-[Documentation](https://docs.rs/zip/0.5.10/zip/)
+[Documentation](https://docs.rs/zip/0.5.13/zip/)
 
 
 Info
diff --git a/src/read.rs b/src/read.rs
index 3aac00f..97bccd2 100644
--- a/src/read.rs
+++ b/src/read.rs
@@ -4,8 +4,7 @@
 use crate::crc32::Crc32Reader;
 use crate::result::{InvalidPassword, ZipError, ZipResult};
 use crate::spec;
-use crate::zipcrypto::ZipCryptoReader;
-use crate::zipcrypto::ZipCryptoReaderValid;
+use crate::zipcrypto::{ZipCryptoReader, ZipCryptoReaderValid, ZipCryptoValidator};
 use std::borrow::Cow;
 use std::collections::HashMap;
 use std::io::{self, prelude::*};
@@ -47,7 +46,7 @@
 /// }
 /// ```
 #[derive(Clone, Debug)]
-pub struct ZipArchive<R: Read + io::Seek> {
+pub struct ZipArchive<R> {
     reader: R,
     files: Vec<ZipFileData>,
     names_map: HashMap<String, usize>,
@@ -161,6 +160,8 @@
 fn make_crypto_reader<'a>(
     compression_method: crate::compression::CompressionMethod,
     crc32: u32,
+    last_modified_time: DateTime,
+    using_data_descriptor: bool,
     reader: io::Take<&'a mut dyn io::Read>,
     password: Option<&[u8]>,
 ) -> ZipResult<Result<CryptoReader<'a>, InvalidPassword>> {
@@ -173,10 +174,17 @@
 
     let reader = match password {
         None => CryptoReader::Plaintext(reader),
-        Some(password) => match ZipCryptoReader::new(reader, password).validate(crc32)? {
-            None => return Ok(Err(InvalidPassword)),
-            Some(r) => CryptoReader::ZipCrypto(r),
-        },
+        Some(password) => {
+            let validator = if using_data_descriptor {
+                ZipCryptoValidator::InfoZipMsdosTime(last_modified_time.timepart())
+            } else {
+                ZipCryptoValidator::PkzipCrc32(crc32)
+            };
+            match ZipCryptoReader::new(reader, password).validate(validator)? {
+                None => return Ok(Err(InvalidPassword)),
+                Some(r) => CryptoReader::ZipCrypto(r),
+            }
+        }
     };
     Ok(Ok(reader))
 }
@@ -209,7 +217,7 @@
 impl<R: Read + io::Seek> ZipArchive<R> {
     /// Get the directory start offset and number of files. This is done in a
     /// separate function to ease the control flow design.
-    fn get_directory_counts(
+    pub(crate) fn get_directory_counts(
         reader: &mut R,
         footer: &spec::CentralDirectoryEnd,
         cde_start_pos: u64,
@@ -481,17 +489,20 @@
         let data = &mut self.files[file_number];
 
         match (password, data.encrypted) {
-            (None, true) => {
-                return Err(ZipError::UnsupportedArchive(
-                    "Password required to decrypt file",
-                ))
-            }
+            (None, true) => return Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)),
             (Some(_), false) => password = None, //Password supplied, but none needed! Discard.
             _ => {}
         }
         let limit_reader = find_content(data, &mut self.reader)?;
 
-        match make_crypto_reader(data.compression_method, data.crc32, limit_reader, password) {
+        match make_crypto_reader(
+            data.compression_method,
+            data.crc32,
+            data.last_modified_time,
+            data.using_data_descriptor,
+            limit_reader,
+            password,
+        ) {
             Ok(Ok(crypto_reader)) => Ok(Ok(ZipFile {
                 crypto_reader: Some(crypto_reader),
                 reader: ZipFileReader::NoReader,
@@ -514,7 +525,8 @@
     Err(ZipError::UnsupportedArchive(detail))
 }
 
-fn central_header_to_zip_file<R: Read + io::Seek>(
+/// Parse a central directory entry to collect the information for the file.
+pub(crate) fn central_header_to_zip_file<R: Read + io::Seek>(
     reader: &mut R,
     archive_offset: u64,
 ) -> ZipResult<ZipFileData> {
@@ -530,6 +542,7 @@
     let flags = reader.read_u16::<LittleEndian>()?;
     let encrypted = flags & 1 == 1;
     let is_utf8 = flags & (1 << 11) != 0;
+    let using_data_descriptor = flags & (1 << 3) != 0;
     let compression_method = reader.read_u16::<LittleEndian>()?;
     let last_mod_time = reader.read_u16::<LittleEndian>()?;
     let last_mod_date = reader.read_u16::<LittleEndian>()?;
@@ -564,6 +577,7 @@
         system: System::from_u8((version_made_by >> 8) as u8),
         version_made_by: version_made_by as u8,
         encrypted,
+        using_data_descriptor,
         compression_method: {
             #[allow(deprecated)]
             CompressionMethod::from_u16(compression_method)
@@ -574,14 +588,16 @@
         uncompressed_size: uncompressed_size as u64,
         file_name,
         file_name_raw,
+        extra_field,
         file_comment,
         header_start: offset,
         central_header_start,
         data_start: 0,
         external_attributes: external_file_attributes,
+        large_file: false,
     };
 
-    match parse_extra_field(&mut result, &*extra_field) {
+    match parse_extra_field(&mut result) {
         Ok(..) | Err(ZipError::Io(..)) => {}
         Err(e) => return Err(e),
     }
@@ -592,20 +608,22 @@
     Ok(result)
 }
 
-fn parse_extra_field(file: &mut ZipFileData, data: &[u8]) -> ZipResult<()> {
-    let mut reader = io::Cursor::new(data);
+fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> {
+    let mut reader = io::Cursor::new(&file.extra_field);
 
-    while (reader.position() as usize) < data.len() {
+    while (reader.position() as usize) < file.extra_field.len() {
         let kind = reader.read_u16::<LittleEndian>()?;
         let len = reader.read_u16::<LittleEndian>()?;
         let mut len_left = len as i64;
         // Zip64 extended information extra field
         if kind == 0x0001 {
             if file.uncompressed_size == 0xFFFFFFFF {
+                file.large_file = true;
                 file.uncompressed_size = reader.read_u64::<LittleEndian>()?;
                 len_left -= 8;
             }
             if file.compressed_size == 0xFFFFFFFF {
+                file.large_file = true;
                 file.compressed_size = reader.read_u64::<LittleEndian>()?;
                 len_left -= 8;
             }
@@ -797,6 +815,11 @@
         self.data.crc32
     }
 
+    /// Get the extra data of the zip header for this file
+    pub fn extra_data(&self) -> &[u8] {
+        &self.data.extra_field
+    }
+
     /// Get the starting offset of the data of the compressed file
     pub fn data_start(&self) -> u64 {
         self.data.data_start
@@ -907,6 +930,7 @@
         system: System::from_u8((version_made_by >> 8) as u8),
         version_made_by: version_made_by as u8,
         encrypted,
+        using_data_descriptor,
         compression_method,
         last_modified_time: DateTime::from_msdos(last_mod_date, last_mod_time),
         crc32,
@@ -914,6 +938,7 @@
         uncompressed_size: uncompressed_size as u64,
         file_name,
         file_name_raw,
+        extra_field,
         file_comment: String::new(), // file comment is only available in the central directory
         // header_start and data start are not available, but also don't matter, since seeking is
         // not available.
@@ -924,9 +949,10 @@
         // We set this to zero, which should be valid as the docs state 'If input came
         // from standard input, this field is set to zero.'
         external_attributes: 0,
+        large_file: false,
     };
 
-    match parse_extra_field(&mut result, &extra_field) {
+    match parse_extra_field(&mut result) {
         Ok(..) | Err(ZipError::Io(..)) => {}
         Err(e) => return Err(e),
     }
@@ -942,8 +968,15 @@
 
     let result_crc32 = result.crc32;
     let result_compression_method = result.compression_method;
-    let crypto_reader =
-        make_crypto_reader(result_compression_method, result_crc32, limit_reader, None)?.unwrap();
+    let crypto_reader = make_crypto_reader(
+        result_compression_method,
+        result_crc32,
+        result.last_modified_time,
+        result.using_data_descriptor,
+        limit_reader,
+        None,
+    )?
+    .unwrap();
 
     Ok(Some(ZipFile {
         data: Cow::Owned(result),
diff --git a/src/result.rs b/src/result.rs
index e8b7d05..5d5ab45 100644
--- a/src/result.rs
+++ b/src/result.rs
@@ -32,6 +32,21 @@
     FileNotFound,
 }
 
+impl ZipError {
+    /// The text used as an error when a password is required and not supplied
+    ///
+    /// ```rust,no_run
+    /// # use zip::result::ZipError;
+    /// # let mut archive = zip::ZipArchive::new(std::io::Cursor::new(&[])).unwrap();
+    /// match archive.by_index(1) {
+    ///     Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)) => eprintln!("a password is needed to unzip this file"),
+    ///     _ => (),
+    /// }
+    /// # ()
+    /// ```
+    pub const PASSWORD_REQUIRED: &'static str = "Password required to decrypt file";
+}
+
 impl From<ZipError> for io::Error {
     fn from(err: ZipError) -> io::Error {
         io::Error::new(io::ErrorKind::Other, err)
diff --git a/src/spec.rs b/src/spec.rs
index 91966b6..4ab3656 100644
--- a/src/spec.rs
+++ b/src/spec.rs
@@ -117,6 +117,14 @@
             number_of_disks,
         })
     }
+
+    pub fn write<T: Write>(&self, writer: &mut T) -> ZipResult<()> {
+        writer.write_u32::<LittleEndian>(ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE)?;
+        writer.write_u32::<LittleEndian>(self.disk_with_central_directory)?;
+        writer.write_u64::<LittleEndian>(self.end_of_central_directory_offset)?;
+        writer.write_u32::<LittleEndian>(self.number_of_disks)?;
+        Ok(())
+    }
 }
 
 pub struct Zip64CentralDirectoryEnd {
@@ -179,4 +187,18 @@
             "Could not find ZIP64 central directory end",
         ))
     }
+
+    pub fn write<T: Write>(&self, writer: &mut T) -> ZipResult<()> {
+        writer.write_u32::<LittleEndian>(ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE)?;
+        writer.write_u64::<LittleEndian>(44)?; // record size
+        writer.write_u16::<LittleEndian>(self.version_made_by)?;
+        writer.write_u16::<LittleEndian>(self.version_needed_to_extract)?;
+        writer.write_u32::<LittleEndian>(self.disk_number)?;
+        writer.write_u32::<LittleEndian>(self.disk_with_central_directory)?;
+        writer.write_u64::<LittleEndian>(self.number_of_files_on_this_disk)?;
+        writer.write_u64::<LittleEndian>(self.number_of_files)?;
+        writer.write_u64::<LittleEndian>(self.central_directory_size)?;
+        writer.write_u64::<LittleEndian>(self.central_directory_offset)?;
+        Ok(())
+    }
 }
diff --git a/src/types.rs b/src/types.rs
index 154e3c2..026aa15 100644
--- a/src/types.rs
+++ b/src/types.rs
@@ -216,6 +216,8 @@
     pub version_made_by: u8,
     /// True if the file is encrypted.
     pub encrypted: bool,
+    /// True if the file uses a data-descriptor section
+    pub using_data_descriptor: bool,
     /// Compression method used to store the file
     pub compression_method: crate::compression::CompressionMethod,
     /// Last modified time. This will only have a 2 second precision.
@@ -230,6 +232,8 @@
     pub file_name: String,
     /// Raw file name. To be used when file_name was incorrectly decoded.
     pub file_name_raw: Vec<u8>,
+    /// Extra field usually used for storage expansion
+    pub extra_field: Vec<u8>,
     /// File comment
     pub file_comment: String,
     /// Specifies where the local header of the file starts
@@ -242,6 +246,8 @@
     pub data_start: u64,
     /// External file attributes
     pub external_attributes: u32,
+    /// Reserve local ZIP64 extra field
+    pub large_file: bool,
 }
 
 impl ZipFileData {
@@ -275,10 +281,18 @@
             })
     }
 
+    pub fn zip64_extension(&self) -> bool {
+        self.uncompressed_size > 0xFFFFFFFF
+            || self.compressed_size > 0xFFFFFFFF
+            || self.header_start > 0xFFFFFFFF
+    }
+
     pub fn version_needed(&self) -> u16 {
-        match self.compression_method {
+        // higher versions matched first
+        match (self.zip64_extension(), self.compression_method) {
             #[cfg(feature = "bzip2")]
-            crate::compression::CompressionMethod::Bzip2 => 46,
+            (_, crate::compression::CompressionMethod::Bzip2) => 46,
+            (true, _) => 45,
             _ => 20,
         }
     }
@@ -303,6 +317,7 @@
             system: System::Dos,
             version_made_by: 0,
             encrypted: false,
+            using_data_descriptor: false,
             compression_method: crate::compression::CompressionMethod::Stored,
             last_modified_time: DateTime::default(),
             crc32: 0,
@@ -310,11 +325,13 @@
             uncompressed_size: 0,
             file_name: file_name.clone(),
             file_name_raw: file_name.into_bytes(),
+            extra_field: Vec::new(),
             file_comment: String::new(),
             header_start: 0,
             data_start: 0,
             central_header_start: 0,
             external_attributes: 0,
+            large_file: false,
         };
         assert_eq!(
             data.file_name_sanitized(),
diff --git a/src/write.rs b/src/write.rs
index bc68817..05c3666 100644
--- a/src/write.rs
+++ b/src/write.rs
@@ -1,11 +1,11 @@
 //! Types for creating ZIP archives
 
 use crate::compression::CompressionMethod;
-use crate::read::ZipFile;
+use crate::read::{central_header_to_zip_file, ZipArchive, ZipFile};
 use crate::result::{ZipError, ZipResult};
 use crate::spec;
 use crate::types::{DateTime, System, ZipFileData, DEFAULT_VERSION};
-use byteorder::{LittleEndian, WriteBytesExt};
+use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
 use crc32fast::Hasher;
 use std::default::Default;
 use std::io;
@@ -68,8 +68,10 @@
     files: Vec<ZipFileData>,
     stats: ZipWriterStats,
     writing_to_file: bool,
-    comment: String,
+    writing_to_extra_field: bool,
+    writing_to_central_extra_field_only: bool,
     writing_raw: bool,
+    comment: Vec<u8>,
 }
 
 #[derive(Default)]
@@ -91,6 +93,7 @@
     compression_method: CompressionMethod,
     last_modified_time: DateTime,
     permissions: Option<u32>,
+    large_file: bool,
 }
 
 impl FileOptions {
@@ -114,6 +117,7 @@
             #[cfg(not(feature = "time"))]
             last_modified_time: DateTime::default(),
             permissions: None,
+            large_file: false,
         }
     }
 
@@ -121,7 +125,6 @@
     ///
     /// The default is `CompressionMethod::Deflated`. If the deflate compression feature is
     /// disabled, `CompressionMethod::Stored` becomes the default.
-    /// otherwise.
     pub fn compression_method(mut self, method: CompressionMethod) -> FileOptions {
         self.compression_method = method;
         self
@@ -145,6 +148,16 @@
         self.permissions = Some(mode & 0o777);
         self
     }
+
+    /// Set whether the new file's compressed and uncompressed size is less than 4 GiB.
+    ///
+    /// If set to `false` and the file exceeds the limit, an I/O error is thrown. If set to `true`,
+    /// readers will require ZIP64 support and if the file does not exceed the limit, 20 B are
+    /// wasted. The default is `false`.
+    pub fn large_file(mut self, large: bool) -> FileOptions {
+        self.large_file = large;
+        self
+    }
 }
 
 impl Default for FileOptions {
@@ -163,11 +176,24 @@
         }
         match self.inner.ref_mut() {
             Some(ref mut w) => {
-                let write_result = w.write(buf);
-                if let Ok(count) = write_result {
-                    self.stats.update(&buf[0..count]);
+                if self.writing_to_extra_field {
+                    self.files.last_mut().unwrap().extra_field.write(buf)
+                } else {
+                    let write_result = w.write(buf);
+                    if let Ok(count) = write_result {
+                        self.stats.update(&buf[0..count]);
+                        if self.stats.bytes_written > 0xFFFFFFFF
+                            && !self.files.last_mut().unwrap().large_file
+                        {
+                            let _inner = mem::replace(&mut self.inner, GenericZipWriter::Closed);
+                            return Err(io::Error::new(
+                                io::ErrorKind::Other,
+                                "Large file option has not been set",
+                            ));
+                        }
+                    }
+                    write_result
                 }
-                write_result
             }
             None => Err(io::Error::new(
                 io::ErrorKind::BrokenPipe,
@@ -194,6 +220,45 @@
     }
 }
 
+impl<A: Read + Write + io::Seek> ZipWriter<A> {
+    /// Initializes the archive from an existing ZIP archive, making it ready for append.
+    pub fn new_append(mut readwriter: A) -> ZipResult<ZipWriter<A>> {
+        let (footer, cde_start_pos) = spec::CentralDirectoryEnd::find_and_parse(&mut readwriter)?;
+
+        if footer.disk_number != footer.disk_with_central_directory {
+            return Err(ZipError::UnsupportedArchive(
+                "Support for multi-disk files is not implemented",
+            ));
+        }
+
+        let (archive_offset, directory_start, number_of_files) =
+            ZipArchive::get_directory_counts(&mut readwriter, &footer, cde_start_pos)?;
+
+        if let Err(_) = readwriter.seek(io::SeekFrom::Start(directory_start)) {
+            return Err(ZipError::InvalidArchive(
+                "Could not seek to start of central directory",
+            ));
+        }
+
+        let files = (0..number_of_files)
+            .map(|_| central_header_to_zip_file(&mut readwriter, archive_offset))
+            .collect::<Result<Vec<_>, _>>()?;
+
+        let _ = readwriter.seek(io::SeekFrom::Start(directory_start)); // seek directory_start to overwrite it
+
+        Ok(ZipWriter {
+            inner: GenericZipWriter::Storer(readwriter),
+            files,
+            stats: Default::default(),
+            writing_to_file: false,
+            writing_to_extra_field: false,
+            writing_to_central_extra_field_only: false,
+            comment: footer.zip_file_comment,
+            writing_raw: true, // avoid recomputing the last file's header
+        })
+    }
+}
+
 impl<W: Write + io::Seek> ZipWriter<W> {
     /// Initializes the archive.
     ///
@@ -204,8 +269,10 @@
             files: Vec::new(),
             stats: Default::default(),
             writing_to_file: false,
-            comment: String::new(),
+            writing_to_extra_field: false,
+            writing_to_central_extra_field_only: false,
             writing_raw: false,
+            comment: Vec::new(),
         }
     }
 
@@ -214,7 +281,15 @@
     where
         S: Into<String>,
     {
-        self.comment = comment.into();
+        self.set_raw_comment(comment.into().into())
+    }
+
+    /// Set ZIP archive comment.
+    ///
+    /// This sets the raw bytes of the comment. The comment
+    /// is typically expected to be encoded in UTF-8
+    pub fn set_raw_comment(&mut self, comment: Vec<u8>) {
+        self.comment = comment;
     }
 
     /// Start a new file for with the requested options.
@@ -229,7 +304,6 @@
     {
         self.finish_file()?;
 
-        let is_raw = raw_values.is_some();
         let raw_values = raw_values.unwrap_or_else(|| ZipRawValues {
             crc32: 0,
             compressed_size: 0,
@@ -245,6 +319,7 @@
                 system: System::Unix,
                 version_made_by: DEFAULT_VERSION,
                 encrypted: false,
+                using_data_descriptor: false,
                 compression_method: options.compression_method,
                 last_modified_time: options.last_modified_time,
                 crc32: raw_values.crc32,
@@ -252,11 +327,13 @@
                 uncompressed_size: raw_values.uncompressed_size,
                 file_name: name.into(),
                 file_name_raw: Vec::new(), // Never used for saving
+                extra_field: Vec::new(),
                 file_comment: String::new(),
                 header_start,
                 data_start: 0,
                 central_header_start: 0,
                 external_attributes: permissions << 16,
+                large_file: options.large_file,
             };
             write_local_file_header(writer, &file)?;
 
@@ -270,17 +347,14 @@
             self.files.push(file);
         }
 
-        self.writing_raw = is_raw;
-        self.inner.switch_to(if is_raw {
-            CompressionMethod::Stored
-        } else {
-            options.compression_method
-        })?;
-
         Ok(())
     }
 
     fn finish_file(&mut self) -> ZipResult<()> {
+        if self.writing_to_extra_field {
+            // Implicitly calling [`ZipWriter::end_extra_data`] for empty files.
+            self.end_extra_data()?;
+        }
         self.inner.switch_to(CompressionMethod::Stored)?;
         let writer = self.inner.get_plain();
 
@@ -316,13 +390,14 @@
         }
         *options.permissions.as_mut().unwrap() |= 0o100000;
         self.start_entry(name, options, None)?;
+        self.inner.switch_to(options.compression_method)?;
         self.writing_to_file = true;
         Ok(())
     }
 
     /// Starts a file, taking a Path as argument.
     ///
-    /// This function ensures that the '/' path seperator is used. It also ignores all non 'Normal'
+    /// This function ensures that the '/' path separator is used. It also ignores all non 'Normal'
     /// Components, such as a starting '/' or '..' and '.'.
     #[deprecated(
         since = "0.5.7",
@@ -336,6 +411,168 @@
         self.start_file(path_to_string(path), options)
     }
 
+    /// Create an aligned file in the archive and start writing its' contents.
+    ///
+    /// Returns the number of padding bytes required to align the file.
+    ///
+    /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`]
+    pub fn start_file_aligned<S>(
+        &mut self,
+        name: S,
+        options: FileOptions,
+        align: u16,
+    ) -> Result<u64, ZipError>
+    where
+        S: Into<String>,
+    {
+        let data_start = self.start_file_with_extra_data(name, options)?;
+        let align = align as u64;
+        if align > 1 && data_start % align != 0 {
+            let pad_length = (align - (data_start + 4) % align) % align;
+            let pad = vec![0; pad_length as usize];
+            self.write_all(b"za").map_err(ZipError::from)?; // 0x617a
+            self.write_u16::<LittleEndian>(pad.len() as u16)
+                .map_err(ZipError::from)?;
+            self.write_all(&pad).map_err(ZipError::from)?;
+            assert_eq!(self.end_local_start_central_extra_data()? % align, 0);
+        }
+        let extra_data_end = self.end_extra_data()?;
+        Ok(extra_data_end - data_start)
+    }
+
+    /// Create a file in the archive and start writing its extra data first.
+    ///
+    /// Finish writing extra data and start writing file data with [`ZipWriter::end_extra_data`].
+    /// Optionally, distinguish local from central extra data with
+    /// [`ZipWriter::end_local_start_central_extra_data`].
+    ///
+    /// Returns the preliminary starting offset of the file data without any extra data allowing to
+    /// align the file data by calculating a pad length to be prepended as part of the extra data.
+    ///
+    /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`]
+    ///
+    /// ```
+    /// use byteorder::{LittleEndian, WriteBytesExt};
+    /// use zip::{ZipArchive, ZipWriter, result::ZipResult};
+    /// use zip::{write::FileOptions, CompressionMethod};
+    /// use std::io::{Write, Cursor};
+    ///
+    /// # fn main() -> ZipResult<()> {
+    /// let mut archive = Cursor::new(Vec::new());
+    ///
+    /// {
+    ///     let mut zip = ZipWriter::new(&mut archive);
+    ///     let options = FileOptions::default()
+    ///         .compression_method(CompressionMethod::Stored);
+    ///
+    ///     zip.start_file_with_extra_data("identical_extra_data.txt", options)?;
+    ///     let extra_data = b"local and central extra data";
+    ///     zip.write_u16::<LittleEndian>(0xbeef)?;
+    ///     zip.write_u16::<LittleEndian>(extra_data.len() as u16)?;
+    ///     zip.write_all(extra_data)?;
+    ///     zip.end_extra_data()?;
+    ///     zip.write_all(b"file data")?;
+    ///
+    ///     let data_start = zip.start_file_with_extra_data("different_extra_data.txt", options)?;
+    ///     let extra_data = b"local extra data";
+    ///     zip.write_u16::<LittleEndian>(0xbeef)?;
+    ///     zip.write_u16::<LittleEndian>(extra_data.len() as u16)?;
+    ///     zip.write_all(extra_data)?;
+    ///     let data_start = data_start as usize + 4 + extra_data.len() + 4;
+    ///     let align = 64;
+    ///     let pad_length = (align - data_start % align) % align;
+    ///     assert_eq!(pad_length, 19);
+    ///     zip.write_u16::<LittleEndian>(0xdead)?;
+    ///     zip.write_u16::<LittleEndian>(pad_length as u16)?;
+    ///     zip.write_all(&vec![0; pad_length])?;
+    ///     let data_start = zip.end_local_start_central_extra_data()?;
+    ///     assert_eq!(data_start as usize % align, 0);
+    ///     let extra_data = b"central extra data";
+    ///     zip.write_u16::<LittleEndian>(0xbeef)?;
+    ///     zip.write_u16::<LittleEndian>(extra_data.len() as u16)?;
+    ///     zip.write_all(extra_data)?;
+    ///     zip.end_extra_data()?;
+    ///     zip.write_all(b"file data")?;
+    ///
+    ///     zip.finish()?;
+    /// }
+    ///
+    /// let mut zip = ZipArchive::new(archive)?;
+    /// assert_eq!(&zip.by_index(0)?.extra_data()[4..], b"local and central extra data");
+    /// assert_eq!(&zip.by_index(1)?.extra_data()[4..], b"central extra data");
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn start_file_with_extra_data<S>(
+        &mut self,
+        name: S,
+        mut options: FileOptions,
+    ) -> ZipResult<u64>
+    where
+        S: Into<String>,
+    {
+        if options.permissions.is_none() {
+            options.permissions = Some(0o644);
+        }
+        *options.permissions.as_mut().unwrap() |= 0o100000;
+        self.start_entry(name, options, None)?;
+        self.writing_to_file = true;
+        self.writing_to_extra_field = true;
+        Ok(self.files.last().unwrap().data_start)
+    }
+
+    /// End local and start central extra data. Requires [`ZipWriter::start_file_with_extra_data`].
+    ///
+    /// Returns the final starting offset of the file data.
+    pub fn end_local_start_central_extra_data(&mut self) -> ZipResult<u64> {
+        let data_start = self.end_extra_data()?;
+        self.files.last_mut().unwrap().extra_field.clear();
+        self.writing_to_extra_field = true;
+        self.writing_to_central_extra_field_only = true;
+        Ok(data_start)
+    }
+
+    /// End extra data and start file data. Requires [`ZipWriter::start_file_with_extra_data`].
+    ///
+    /// Returns the final starting offset of the file data.
+    pub fn end_extra_data(&mut self) -> ZipResult<u64> {
+        // Require `start_file_with_extra_data()`. Ensures `file` is some.
+        if !self.writing_to_extra_field {
+            return Err(ZipError::Io(io::Error::new(
+                io::ErrorKind::Other,
+                "Not writing to extra field",
+            )));
+        }
+        let file = self.files.last_mut().unwrap();
+
+        validate_extra_data(&file)?;
+
+        if !self.writing_to_central_extra_field_only {
+            let writer = self.inner.get_plain();
+
+            // Append extra data to local file header and keep it for central file header.
+            writer.write_all(&file.extra_field)?;
+
+            // Update final `data_start`.
+            let header_end = file.data_start + file.extra_field.len() as u64;
+            self.stats.start = header_end;
+            file.data_start = header_end;
+
+            // Update extra field length in local file header.
+            let extra_field_length =
+                if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16;
+            writer.seek(io::SeekFrom::Start(file.header_start + 28))?;
+            writer.write_u16::<LittleEndian>(extra_field_length)?;
+            writer.seek(io::SeekFrom::Start(header_end))?;
+
+            self.inner.switch_to(file.compression_method)?;
+        }
+
+        self.writing_to_extra_field = false;
+        self.writing_to_central_extra_field_only = false;
+        Ok(file.data_start)
+    }
+
     /// Add a new file using the already compressed data from a ZIP file being read and renames it, this
     /// allows faster copies of the `ZipFile` since there is no need to decompress and compress it again.
     /// Any `ZipFile` metadata is copied and not checked, for example the file CRC.
@@ -381,6 +618,7 @@
 
         self.start_entry(name, options, Some(raw_values))?;
         self.writing_to_file = true;
+        self.writing_raw = true;
 
         io::copy(file.get_raw_reader(), self)?;
 
@@ -478,14 +716,51 @@
             }
             let central_size = writer.seek(io::SeekFrom::Current(0))? - central_start;
 
+            if self.files.len() > 0xFFFF || central_size > 0xFFFFFFFF || central_start > 0xFFFFFFFF
+            {
+                let zip64_footer = spec::Zip64CentralDirectoryEnd {
+                    version_made_by: DEFAULT_VERSION as u16,
+                    version_needed_to_extract: DEFAULT_VERSION as u16,
+                    disk_number: 0,
+                    disk_with_central_directory: 0,
+                    number_of_files_on_this_disk: self.files.len() as u64,
+                    number_of_files: self.files.len() as u64,
+                    central_directory_size: central_size,
+                    central_directory_offset: central_start,
+                };
+
+                zip64_footer.write(writer)?;
+
+                let zip64_footer = spec::Zip64CentralDirectoryEndLocator {
+                    disk_with_central_directory: 0,
+                    end_of_central_directory_offset: central_start + central_size,
+                    number_of_disks: 1,
+                };
+
+                zip64_footer.write(writer)?;
+            }
+
+            let number_of_files = if self.files.len() > 0xFFFF {
+                0xFFFF
+            } else {
+                self.files.len() as u16
+            };
             let footer = spec::CentralDirectoryEnd {
                 disk_number: 0,
                 disk_with_central_directory: 0,
-                number_of_files_on_this_disk: self.files.len() as u16,
-                number_of_files: self.files.len() as u16,
-                central_directory_size: central_size as u32,
-                central_directory_offset: central_start as u32,
-                zip_file_comment: self.comment.as_bytes().to_vec(),
+                zip_file_comment: self.comment.clone(),
+                number_of_files_on_this_disk: number_of_files,
+                number_of_files,
+                central_directory_size: if central_size > 0xFFFFFFFF {
+                    0xFFFFFFFF
+                } else {
+                    central_size as u32
+                },
+                central_directory_offset: if central_start > 0xFFFFFFFF {
+                    0xFFFFFFFF
+                } else {
+                    central_start as u32
+                },
             };
 
             footer.write(writer)?;
@@ -553,7 +828,7 @@
                 )),
                 #[cfg(feature = "bzip2")]
                 CompressionMethod::Bzip2 => {
-                    GenericZipWriter::Bzip2(BzEncoder::new(bare, bzip2::Compression::Default))
+                    GenericZipWriter::Bzip2(BzEncoder::new(bare, bzip2::Compression::default()))
                 }
                 CompressionMethod::Unsupported(..) => {
                     return Err(ZipError::UnsupportedArchive("Unsupported compression"))
@@ -637,18 +912,28 @@
     // crc-32
     writer.write_u32::<LittleEndian>(file.crc32)?;
     // compressed size
-    writer.write_u32::<LittleEndian>(file.compressed_size as u32)?;
+    writer.write_u32::<LittleEndian>(if file.compressed_size > 0xFFFFFFFF {
+        0xFFFFFFFF
+    } else {
+        file.compressed_size as u32
+    })?;
     // uncompressed size
-    writer.write_u32::<LittleEndian>(file.uncompressed_size as u32)?;
+    writer.write_u32::<LittleEndian>(if file.uncompressed_size > 0xFFFFFFFF {
+        0xFFFFFFFF
+    } else {
+        file.uncompressed_size as u32
+    })?;
     // file name length
     writer.write_u16::<LittleEndian>(file.file_name.as_bytes().len() as u16)?;
     // extra field length
-    let extra_field = build_extra_field(file)?;
-    writer.write_u16::<LittleEndian>(extra_field.len() as u16)?;
+    let extra_field_length = if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16;
+    writer.write_u16::<LittleEndian>(extra_field_length)?;
     // file name
     writer.write_all(file.file_name.as_bytes())?;
-    // extra field
-    writer.write_all(&extra_field)?;
+    // zip64 extra field
+    if file.large_file {
+        write_local_zip64_extra_field(writer, &file)?;
+    }
 
     Ok(())
 }
@@ -660,12 +945,37 @@
     const CRC32_OFFSET: u64 = 14;
     writer.seek(io::SeekFrom::Start(file.header_start + CRC32_OFFSET))?;
     writer.write_u32::<LittleEndian>(file.crc32)?;
-    writer.write_u32::<LittleEndian>(file.compressed_size as u32)?;
-    writer.write_u32::<LittleEndian>(file.uncompressed_size as u32)?;
+    writer.write_u32::<LittleEndian>(if file.compressed_size > 0xFFFFFFFF {
+        if file.large_file {
+            0xFFFFFFFF
+        } else {
+            // compressed size can be slightly larger than uncompressed size
+            return Err(ZipError::Io(io::Error::new(
+                io::ErrorKind::Other,
+                "Large file option has not been set",
+            )));
+        }
+    } else {
+        file.compressed_size as u32
+    })?;
+    writer.write_u32::<LittleEndian>(if file.uncompressed_size > 0xFFFFFFFF {
+        // uncompressed size is checked on write to catch it as soon as possible
+        0xFFFFFFFF
+    } else {
+        file.uncompressed_size as u32
+    })?;
+    if file.large_file {
+        update_local_zip64_extra_field(writer, file)?;
+    }
     Ok(())
 }
 
 fn write_central_directory_header<T: Write>(writer: &mut T, file: &ZipFileData) -> ZipResult<()> {
+    // buffer zip64 extra field to determine its variable length
+    let mut zip64_extra_field = [0; 28];
+    let zip64_extra_field_length =
+        write_central_zip64_extra_field(&mut zip64_extra_field.as_mut(), file)?;
+
     // central file header signature
     writer.write_u32::<LittleEndian>(spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE)?;
     // version made by
@@ -689,14 +999,21 @@
     // crc-32
     writer.write_u32::<LittleEndian>(file.crc32)?;
     // compressed size
-    writer.write_u32::<LittleEndian>(file.compressed_size as u32)?;
+    writer.write_u32::<LittleEndian>(if file.compressed_size > 0xFFFFFFFF {
+        0xFFFFFFFF
+    } else {
+        file.compressed_size as u32
+    })?;
     // uncompressed size
-    writer.write_u32::<LittleEndian>(file.uncompressed_size as u32)?;
+    writer.write_u32::<LittleEndian>(if file.uncompressed_size > 0xFFFFFFFF {
+        0xFFFFFFFF
+    } else {
+        file.uncompressed_size as u32
+    })?;
     // file name length
     writer.write_u16::<LittleEndian>(file.file_name.as_bytes().len() as u16)?;
     // extra field length
-    let extra_field = build_extra_field(file)?;
-    writer.write_u16::<LittleEndian>(extra_field.len() as u16)?;
+    writer.write_u16::<LittleEndian>(zip64_extra_field_length + file.extra_field.len() as u16)?;
     // file comment length
     writer.write_u16::<LittleEndian>(0)?;
     // disk number start
@@ -706,21 +1023,139 @@
     // external file attributes
     writer.write_u32::<LittleEndian>(file.external_attributes)?;
     // relative offset of local header
-    writer.write_u32::<LittleEndian>(file.header_start as u32)?;
+    writer.write_u32::<LittleEndian>(if file.header_start > 0xFFFFFFFF {
+        0xFFFFFFFF
+    } else {
+        file.header_start as u32
+    })?;
     // file name
     writer.write_all(file.file_name.as_bytes())?;
+    // zip64 extra field
+    writer.write_all(&zip64_extra_field[..zip64_extra_field_length as usize])?;
     // extra field
-    writer.write_all(&extra_field)?;
+    writer.write_all(&file.extra_field)?;
     // file comment
     // <none>
 
     Ok(())
 }
 
-fn build_extra_field(_file: &ZipFileData) -> ZipResult<Vec<u8>> {
-    let writer = Vec::new();
-    // Future work
-    Ok(writer)
+fn validate_extra_data(file: &ZipFileData) -> ZipResult<()> {
+    let mut data = file.extra_field.as_slice();
+
+    if data.len() > 0xFFFF {
+        return Err(ZipError::Io(io::Error::new(
+            io::ErrorKind::InvalidData,
+            "Extra data exceeds extra field",
+        )));
+    }
+
+    while data.len() > 0 {
+        let left = data.len();
+        if left < 4 {
+            return Err(ZipError::Io(io::Error::new(
+                io::ErrorKind::Other,
+                "Incomplete extra data header",
+            )));
+        }
+        let kind = data.read_u16::<LittleEndian>()?;
+        let size = data.read_u16::<LittleEndian>()? as usize;
+        let left = left - 4;
+
+        if kind == 0x0001 {
+            return Err(ZipError::Io(io::Error::new(
+                io::ErrorKind::Other,
+                "No custom ZIP64 extra data allowed",
+            )));
+        }
+
+        #[cfg(not(feature = "unreserved"))]
+        {
+            if kind <= 31 || EXTRA_FIELD_MAPPING.iter().any(|&mapped| mapped == kind) {
+                return Err(ZipError::Io(io::Error::new(
+                    io::ErrorKind::Other,
+                    format!(
+                        "Extra data header ID {:#06} requires crate feature \"unreserved\"",
+                        kind,
+                    ),
+                )));
+            }
+        }
+
+        if size > left {
+            return Err(ZipError::Io(io::Error::new(
+                io::ErrorKind::Other,
+                "Extra data size exceeds extra field",
+            )));
+        }
+
+        data = &data[size..];
+    }
+
+    Ok(())
+}
+
+fn write_local_zip64_extra_field<T: Write>(writer: &mut T, file: &ZipFileData) -> ZipResult<()> {
+    // This entry in the Local header MUST include BOTH original
+    // and compressed file size fields.
+    writer.write_u16::<LittleEndian>(0x0001)?;
+    writer.write_u16::<LittleEndian>(16)?;
+    writer.write_u64::<LittleEndian>(file.uncompressed_size)?;
+    writer.write_u64::<LittleEndian>(file.compressed_size)?;
+    // Excluded fields:
+    // u32: disk start number
+    Ok(())
+}
+
+fn update_local_zip64_extra_field<T: Write + io::Seek>(
+    writer: &mut T,
+    file: &ZipFileData,
+) -> ZipResult<()> {
+    let zip64_extra_field = file.header_start + 30 + file.file_name.as_bytes().len() as u64;
+    writer.seek(io::SeekFrom::Start(zip64_extra_field + 4))?;
+    writer.write_u64::<LittleEndian>(file.uncompressed_size)?;
+    writer.write_u64::<LittleEndian>(file.compressed_size)?;
+    // Excluded fields:
+    // u32: disk start number
+    Ok(())
+}
+
+fn write_central_zip64_extra_field<T: Write>(writer: &mut T, file: &ZipFileData) -> ZipResult<u16> {
+    // The order of the fields in the zip64 extended
+    // information record is fixed, but the fields MUST
+    // only appear if the corresponding Local or Central
+    // directory record field is set to 0xFFFF or 0xFFFFFFFF.
+    let mut size = 0;
+    let uncompressed_size = file.uncompressed_size > 0xFFFFFFFF;
+    let compressed_size = file.compressed_size > 0xFFFFFFFF;
+    let header_start = file.header_start > 0xFFFFFFFF;
+    if uncompressed_size {
+        size += 8;
+    }
+    if compressed_size {
+        size += 8;
+    }
+    if header_start {
+        size += 8;
+    }
+    if size > 0 {
+        writer.write_u16::<LittleEndian>(0x0001)?;
+        writer.write_u16::<LittleEndian>(size)?;
+        size += 4;
+
+        if uncompressed_size {
+            writer.write_u64::<LittleEndian>(file.uncompressed_size)?;
+        }
+        if compressed_size {
+            writer.write_u64::<LittleEndian>(file.compressed_size)?;
+        }
+        if header_start {
+            writer.write_u64::<LittleEndian>(file.header_start)?;
+        }
+        // Excluded fields:
+        // u32: disk start number
+    }
+    Ok(size)
 }
 
 fn path_to_string(path: &std::path::Path) -> String {
@@ -791,6 +1226,7 @@
             compression_method: CompressionMethod::Stored,
             last_modified_time: DateTime::default(),
             permissions: Some(33188),
+            large_file: false,
         };
         writer.start_file("mimetype", options).unwrap();
         writer
@@ -819,3 +1255,12 @@
         assert_eq!(path_str, "windows/system32");
     }
 }
+
+#[cfg(not(feature = "unreserved"))]
+const EXTRA_FIELD_MAPPING: [u16; 49] = [
+    0x0001, 0x0007, 0x0008, 0x0009, 0x000a, 0x000c, 0x000d, 0x000e, 0x000f, 0x0014, 0x0015, 0x0016,
+    0x0017, 0x0018, 0x0019, 0x0020, 0x0021, 0x0022, 0x0023, 0x0065, 0x0066, 0x4690, 0x07c8, 0x2605,
+    0x2705, 0x2805, 0x334d, 0x4341, 0x4453, 0x4704, 0x470f, 0x4b46, 0x4c41, 0x4d49, 0x4f4c, 0x5356,
+    0x5455, 0x554e, 0x5855, 0x6375, 0x6542, 0x7075, 0x756e, 0x7855, 0xa11e, 0xa220, 0xfd4a, 0x9901,
+    0x9902,
+];
diff --git a/src/zipcrypto.rs b/src/zipcrypto.rs
index 32e8af8..3196ea3 100644
--- a/src/zipcrypto.rs
+++ b/src/zipcrypto.rs
@@ -57,6 +57,11 @@
     keys: ZipCryptoKeys,
 }
 
+pub enum ZipCryptoValidator {
+    PkzipCrc32(u32),
+    InfoZipMsdosTime(u16),
+}
+
 impl<R: std::io::Read> ZipCryptoReader<R> {
     /// Note: The password is `&[u8]` and not `&str` because the
     /// [zip specification](https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.3.TXT)
@@ -81,7 +86,7 @@
     /// Read the ZipCrypto header bytes and validate the password.
     pub fn validate(
         mut self,
-        crc32_plaintext: u32,
+        validator: ZipCryptoValidator,
     ) -> Result<Option<ZipCryptoReaderValid<R>>, std::io::Error> {
         // ZipCrypto prefixes a file with a 12 byte header
         let mut header_buf = [0u8; 12];
@@ -90,13 +95,30 @@
             *byte = self.keys.decrypt_byte(*byte);
         }
 
-        // PKZIP before 2.0 used 2 byte CRC check.
-        // PKZIP 2.0+ used 1 byte CRC check. It's more secure.
-        // We also use 1 byte CRC.
+        match validator {
+            ZipCryptoValidator::PkzipCrc32(crc32_plaintext) => {
+                // PKZIP before 2.0 used 2 byte CRC check.
+                // PKZIP 2.0+ used 1 byte CRC check. It's more secure.
+                // We also use 1 byte CRC.
 
-        if (crc32_plaintext >> 24) as u8 != header_buf[11] {
-            return Ok(None); // Wrong password
+                if (crc32_plaintext >> 24) as u8 != header_buf[11] {
+                    return Ok(None); // Wrong password
+                }
+            }
+            ZipCryptoValidator::InfoZipMsdosTime(last_mod_time) => {
+                // Info-ZIP modification to ZipCrypto format:
+                // If bit 3 of the general purpose bit flag is set
+                // (indicates that the file uses a data-descriptor section),
+                // it uses high byte of 16-bit File Time.
+                // Info-ZIP code probably writes 2 bytes of File Time.
+                // We check only 1 byte.
+
+                if (last_mod_time >> 8) as u8 != header_buf[11] {
+                    return Ok(None); // Wrong password
+                }
+            }
         }
+
         Ok(Some(ZipCryptoReaderValid { reader: self }))
     }
 }
diff --git a/tests/end_to_end.rs b/tests/end_to_end.rs
index b826f54..baebd28 100644
--- a/tests/end_to_end.rs
+++ b/tests/end_to_end.rs
@@ -1,3 +1,4 @@
+use byteorder::{LittleEndian, WriteBytesExt};
 use std::collections::HashSet;
 use std::io::prelude::*;
 use std::io::{Cursor, Seek};
@@ -46,6 +47,25 @@
     check_zip_file_contents(&mut tgt_archive, COPY_ENTRY_NAME);
 }
 
+// This test asserts that after appending to a `ZipWriter`, then reading its contents back out,
+// both the prior data and the appended data will be exactly the same as their originals.
+#[test]
+fn append() {
+    let mut file = &mut Cursor::new(Vec::new());
+    write_to_zip(file).expect("file written");
+
+    {
+        let mut zip = zip::ZipWriter::new_append(&mut file).unwrap();
+        zip.start_file(COPY_ENTRY_NAME, Default::default()).unwrap();
+        zip.write_all(LOREM_IPSUM).unwrap();
+        zip.finish().unwrap();
+    }
+
+    let mut zip = zip::ZipArchive::new(&mut file).unwrap();
+    check_zip_file_contents(&mut zip, ENTRY_NAME);
+    check_zip_file_contents(&mut zip, COPY_ENTRY_NAME);
+}
+
 fn write_to_zip(file: &mut Cursor<Vec<u8>>) -> zip::result::ZipResult<()> {
     let mut zip = zip::ZipWriter::new(file);
 
@@ -57,6 +77,13 @@
     zip.start_file("test/☃.txt", options)?;
     zip.write_all(b"Hello, World!\n")?;
 
+    zip.start_file_with_extra_data("test_with_extra_data/🐢.txt", Default::default())?;
+    zip.write_u16::<LittleEndian>(0xbeef)?;
+    zip.write_u16::<LittleEndian>(EXTRA_DATA.len() as u16)?;
+    zip.write_all(EXTRA_DATA)?;
+    zip.end_extra_data()?;
+    zip.write_all(b"Hello, World! Again.\n")?;
+
     zip.start_file(ENTRY_NAME, Default::default())?;
     zip.write_all(LOREM_IPSUM)?;
 
@@ -65,13 +92,27 @@
 }
 
 fn read_zip<R: Read + Seek>(zip_file: R) -> zip::result::ZipResult<zip::ZipArchive<R>> {
-    let archive = zip::ZipArchive::new(zip_file).unwrap();
+    let mut archive = zip::ZipArchive::new(zip_file).unwrap();
 
-    let expected_file_names = ["test/", "test/☃.txt", ENTRY_NAME];
+    let expected_file_names = [
+        "test/",
+        "test/☃.txt",
+        "test_with_extra_data/🐢.txt",
+        ENTRY_NAME,
+    ];
     let expected_file_names = HashSet::from_iter(expected_file_names.iter().map(|&v| v));
     let file_names = archive.file_names().collect::<HashSet<_>>();
     assert_eq!(file_names, expected_file_names);
 
+    {
+        let file_with_extra_data = archive.by_name("test_with_extra_data/🐢.txt")?;
+        let mut extra_data = Vec::new();
+        extra_data.write_u16::<LittleEndian>(0xbeef)?;
+        extra_data.write_u16::<LittleEndian>(EXTRA_DATA.len() as u16)?;
+        extra_data.write_all(EXTRA_DATA)?;
+        assert_eq!(file_with_extra_data.extra_data(), extra_data.as_slice());
+    }
+
     Ok(archive)
 }
 
@@ -103,6 +144,8 @@
 inceptos himenaeos. Maecenas feugiat velit in ex ultrices scelerisque id id neque.
 ";
 
+const EXTRA_DATA: &'static [u8] = b"Extra Data";
+
 const ENTRY_NAME: &str = "test/lorem_ipsum.txt";
 
 const COPY_ENTRY_NAME: &str = "test/lorem_ipsum_renamed.txt";
diff --git a/tests/zip_crypto.rs b/tests/zip_crypto.rs
index cae6b1f..6c4d6b8 100644
--- a/tests/zip_crypto.rs
+++ b/tests/zip_crypto.rs
@@ -47,9 +47,9 @@
         // No password
         let file = archive.by_index(0);
         match file {
-            Err(zip::result::ZipError::UnsupportedArchive("Password required to decrypt file")) => {
-                ()
-            }
+            Err(zip::result::ZipError::UnsupportedArchive(
+                zip::result::ZipError::PASSWORD_REQUIRED,
+            )) => (),
             Err(_) => panic!(
                 "Expected PasswordRequired error when opening encrypted file without password"
             ),