Merge remote-tracking branch 'origin/upstream' am: f545393dab am: 54648031d1

Original change: undetermined

Change-Id: I439f642b3db2f0655bab230338558691d1af84bc
Signed-off-by: Automerger Merge Worker <[email protected]>
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..346bbcc
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,44 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file after the first "rust_*" or "genrule" module
+// because the changes will be overridden on upgrade.
+// Content before the first "rust_*" or "genrule" module is preserved.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_libh2_license"],
+}
+
+license {
+    name: "external_rust_crates_libh2_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libh2",
+    host_supported: true,
+    crate_name: "h2",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.4.4",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    rustlibs: [
+        "libbytes",
+        "libfnv",
+        "libfutures_core",
+        "libfutures_sink",
+        "libfutures_util",
+        "libhttp",
+        "libindexmap",
+        "libslab",
+        "libtokio",
+        "libtokio_util",
+        "libtracing",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..3b9663d
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,340 @@
+# 0.4.4 (April 3, 2024)
+
+* Limit number of CONTINUATION frames for misbehaving connections.
+
+# 0.4.3 (March 15, 2024)
+
+* Fix flow control limits to not apply until receiving SETTINGS ack.
+* Fix not returning an error if IO ended without `close_notify`.
+* Improve performance of decoding many headers.
+
+# 0.4.2 (January 17th, 2024)
+
+* Limit error resets for misbehaving connections.
+* Fix selecting MAX_CONCURRENT_STREAMS value if no value is advertised initially.
+
+# 0.4.1 (January 8, 2024)
+
+* Fix assigning connection capacity which could starve streams in some instances.
+
+# 0.4.0 (November 15, 2023)
+
+* Update to `http` 1.0.
+* Remove deprecated `Server::poll_close()`.
+
+# 0.3.22 (November 15, 2023)
+
+* Add `header_table_size(usize)` option to client and server builders.
+* Improve throughput when vectored IO is not available.
+* Update indexmap to 2.
+
+# 0.3.21 (August 21, 2023)
+
+* Fix opening of new streams over peer's max concurrent limit.
+* Fix `RecvStream` to return data even if it has received a `CANCEL` stream error.
+* Update MSRV to 1.63.
+
+# 0.3.20 (June 26, 2023)
+
+* Fix panic if a server received a request with a `:status` pseudo header in the 1xx range.
+* Fix panic if a reset stream had pending push promises that were more than allowed.
+* Fix potential flow control overflow by subtraction, instead returning a connection error.
+
+# 0.3.19 (May 12, 2023)
+
+* Fix counting reset streams when triggered by a GOAWAY.
+* Send `too_many_resets` in opaque debug data of GOAWAY when too many resets received.
+
+# 0.3.18 (April 17, 2023)
+
+* Fix panic because of opposite check in `is_remote_local()`.
+
+# 0.3.17 (April 13, 2023)
+
+* Add `Error::is_library()` method to check if the originated inside `h2`.
+* Add `max_pending_accept_reset_streams(usize)` option to client and server
+  builders.
+* Fix theoretical memory growth when receiving too many HEADERS and then
+  RST_STREAM frames faster than an application can accept them off the queue.
+  (CVE-2023-26964)
+
+# 0.3.16 (February 27, 2023)
+
+* Set `Protocol` extension on requests when received Extended CONNECT requests.
+* Remove `B: Unpin + 'static` bound requiremented of bufs
+* Fix releasing of frames when stream is finished, reducing memory usage.
+* Fix panic when trying to send data and connection window is available, but stream window is not.
+* Fix spurious wakeups when stream capacity is not available.
+
+# 0.3.15 (October 21, 2022)
+
+* Remove `B: Buf` bound on `SendStream`'s parameter
+* add accessor for `StreamId` u32
+
+# 0.3.14 (August 16, 2022)
+
+* Add `Error::is_reset` function.
+* Bump MSRV to Rust 1.56.
+* Return `RST_STREAM(NO_ERROR)` when the server early responds.
+
+# 0.3.13 (March 31, 2022)
+
+* Update private internal `tokio-util` dependency.
+
+# 0.3.12 (March 9, 2022)
+
+* Avoid time operations that can panic (#599)
+* Bump MSRV to Rust 1.49 (#606)
+* Fix header decoding error when a header name is contained at a continuation
+  header boundary (#589)
+* Remove I/O type names from handshake `tracing` spans (#608)
+
+# 0.3.11 (January 26, 2022)
+
+* Make `SendStream::poll_capacity` never return `Ok(Some(0))` (#596)
+* Fix panic when receiving already reset push promise (#597)
+
+# 0.3.10 (January 6, 2022)
+
+* Add `Error::is_go_away()` and `Error::is_remote()` methods.
+* Fix panic if receiving malformed PUSH_PROMISE with stream ID of 0.
+
+# 0.3.9 (December 9, 2021)
+
+* Fix hang related to new `max_send_buffer_size`.
+
+# 0.3.8 (December 8, 2021)
+
+* Add "extended CONNECT support". Adds `h2::ext::Protocol`, which is used for request and response extensions to connect new protocols over an HTTP/2 stream.
+* Add `max_send_buffer_size` options to client and server builders, and a default of ~400MB. This acts like a high-water mark for the `poll_capacity()` method.
+* Fix panic if receiving malformed HEADERS with stream ID of 0.
+
+# 0.3.7 (October 22, 2021)
+
+* Fix panic if server sends a malformed frame on a stream client was about to open.
+* Fix server to treat `:status` in a request as a stream error instead of connection error.
+
+# 0.3.6 (September 30, 2021)
+
+* Fix regression of `h2::Error` that were created via `From<h2::Reason>` not returning their reason code in `Error::reason()`.
+
+# 0.3.5 (September 29, 2021)
+
+* Fix sending of very large headers. Previously when a single header was too big to fit in a single `HEADERS` frame, an error was returned. Now it is broken up and sent correctly.
+* Fix buffered data field to be a bigger integer size.
+* Refactor error format to include what initiated the error (remote, local, or user), if it was a stream or connection-level error, and any received debug data.
+
+# 0.3.4 (August 20, 2021)
+
+* Fix panic when encoding header size update over a certain size.
+* Fix `SendRequest` to wake up connection when dropped.
+* Fix potential hang if `RecvStream` is placed in the request or response `extensions`.
+* Stop calling `Instant::now` if zero reset streams are configured.
+
+# 0.3.3 (April 29, 2021)
+
+* Fix client being able to make `CONNECT` requests without a `:path`.
+* Expose `RecvStream::poll_data`.
+* Fix some docs.
+
+# 0.3.2 (March 24, 2021)
+
+* Fix incorrect handling of received 1xx responses on the client when the request body is still streaming.
+
+# 0.3.1 (February 26, 2021)
+
+* Add `Connection::max_concurrent_recv_streams()` getter.
+* Add `Connection::max_concurrent_send_streams()` getter.
+* Fix client to ignore receipt of 1xx headers frames.
+* Fix incorrect calculation of pseudo header lengths when determining if a received header is too big.
+* Reduce monomorphized code size of internal code.
+
+# 0.3.0 (December 23, 2020)
+
+* Update to Tokio v1 and Bytes v1.
+* Disable `tracing`'s `log` feature. (It can still be enabled by a user in their own `Cargo.toml`.)
+
+# 0.2.7 (October 22, 2020)
+
+* Fix stream ref count when sending a push promise
+* Fix receiving empty DATA frames in response to a HEAD request
+* Fix handling of client disabling SERVER_PUSH
+
+# 0.2.6 (July 13, 2020)
+
+* Integrate `tracing` directly where `log` was used. (For 0.2.x, `log`s are still emitted by default.)
+
+# 0.2.5 (May 6, 2020)
+
+* Fix rare debug assert failure in store shutdown.
+
+# 0.2.4 (March 30, 2020)
+
+* Fix when receiving `SETTINGS_HEADER_TABLE_SIZE` setting.
+
+# 0.2.3 (March 25, 2020)
+
+* Fix server being able to accept `CONNECT` requests without `:scheme` or `:path`.
+* Fix receiving a GOAWAY frame from updating the recv max ID, it should only update max send ID.
+
+# 0.2.2 (March 3, 2020)
+
+* Reduce size of `FlowControl` and `RecvStream`.
+
+# 0.2.1 (December 6, 2019)
+
+* Relax `Unpin` bounds on the send `Buf` generic.
+
+# 0.2.0 (December 3, 2019)
+
+* Add `server::Connection::set_initial_window_size` and `client::Connection::set_initial_window_size` which can adjust the `INITIAL_WINDOW_SIZE` setting on an existing connection (#421).
+* Update to `http` v0.2.
+* Update to `tokio` v0.2.
+* Change `unstable-stream` feature to `stream`.
+* Change `ReserveCapacity` to `FlowControl` (#423).
+* Remove `From<io::Error>` for `Error`.
+
+# 0.2.0-alpha.3 (October 1, 2019)
+
+* Update to futures `0.3.0-alpha.19`.
+* Update to tokio `0.2.0-alpha.6`.
+
+# 0.2.0-alpha.2 (September 20, 2019)
+
+* Add server support for `PUSH_PROMISE`s (#327).
+* Update to tokio `0.2.0-alpha.5`.
+* Change `stream` feature to `unstable-stream`.
+
+# 0.2.0-alpha.1 (August 30, 2019)
+
+* Update from `futures` 0.1 to `std::future::Future`.
+* Update `AsyncRead`/`AsyncWrite` to `tokio-io` 0.2 alpha.
+* Change `Stream` implementations to be optional, default disabled. Specific async and poll functions are now inherent, and `Stream` can be re-enabled with the `stream` cargo feature.
+
+# 0.1.25 (June 28, 2019)
+
+* Fix to send a `RST_STREAM` instead of `GOAWAY` if receiving a frame on a previously closed stream.
+* Fix receiving trailers without an end-stream flag to be a stream error instead of connection error.
+
+# 0.1.24 (June 17, 2019)
+
+* Fix server wrongly rejecting requests that don't have an `:authority` header (#372).
+
+# 0.1.23 (June 4, 2019)
+
+* Fix leaking of received DATA frames if the `RecvStream` is never polled (#368).
+
+# 0.1.22 (June 3, 2019)
+
+* Fix rare panic when remote sends `RST_STREAM` or `GOAWAY` for a stream pending window capacity (#364).
+
+# 0.1.21 (May 30, 2019)
+
+* Fix write loop when a header didn't fit in write buffer.
+
+# 0.1.20 (May 16, 2019)
+
+* Fix lifetime conflict for older compilers.
+
+# 0.1.19 (May 15, 2019)
+
+* Fix rare crash if `CONTINUATION` frame resumed in the middle of headers with the same name.
+* Fix HPACK encoder using an old evicted index for repeated header names.
+
+# 0.1.18 (April 9, 2019)
+
+* Fix `server::Connection::abrupt_shutdown` to no longer return the same error the user sent (#352).
+
+# 0.1.17 (March 12, 2019)
+
+* Add user PING support (#346).
+* Fix notifying a `RecvStream` task if locally sending a reset.
+* Fix connections "hanging" when all handles are dropped but some streams had been reset.
+
+# 0.1.16 (January 24, 2019)
+
+* Log header values when malformed (#342).
+
+# 0.1.15 (January 12, 2019)
+
+* Fix race condition bug related to shutting down the client (#338).
+
+# 0.1.14 (December 5, 2018)
+
+* Fix closed streams to always return window capacity to the connection (#334).
+* Fix locking when `Debug` printing an `OpaqueStreamRef` (#333).
+* Fix inverted split for DATA frame padding (#330).
+* Reduce `Debug` noise for `Frame` (#329).
+
+# 0.1.13 (October 16, 2018)
+
+* Add client support for Push Promises (#314).
+* Expose `io::Error` from `h2::Error` (#311)
+* Misc bug fixes (#304, #309, #319, #313, #320).
+
+# 0.1.12 (August 8, 2018)
+
+* Fix initial send window size (#301).
+* Fix panic when calling `reserve_capacity` after connection has been closed (#302).
+* Fix handling of incoming `SETTINGS_INITIAL_WINDOW_SIZE`. (#299)
+
+# 0.1.11 (July 31, 2018)
+
+* Add `stream_id` accessors to public API types (#292).
+* Fix potential panic when dropping clients (#295).
+* Fix busy loop when shutting down server (#296).
+
+# 0.1.10 (June 15, 2018)
+
+* Fix potential panic in `SendRequest::poll_ready()` (#281).
+* Fix infinite loop on reset connection during prefix (#285).
+
+# 0.1.9 (May 31, 2018)
+
+* Add `poll_reset` to `SendResponse` and `SendStream` (#279).
+
+# 0.1.8 (May 23, 2018)
+
+* Fix client bug when max streams is reached. (#277)
+
+# 0.1.7 (May 14, 2018)
+
+* Misc bug fixes (#266, #273, #261, #275).
+
+# 0.1.6 (April 24, 2018)
+
+* Misc bug fixes related to stream management (#258, #260, #262).
+
+# 0.1.5 (April 6, 2018)
+
+* Fix the `last_stream_id` sent during graceful GOAWAY (#254).
+
+# 0.1.4 (April 5, 2018)
+
+* Add `initial_connection_window_size` to client and server `Builder`s (#249).
+* Add `graceful_shutdown` and `abrupt_shutdown` to `server::Connection`,
+  deprecating `close_connection` (#250).
+
+# 0.1.3 (March 28, 2018)
+
+* Allow configuring max streams before the peer's settings frame is
+  received (#242).
+* Fix HPACK decoding bug with regards to large literals (#244).
+* Fix state transition bug triggered by receiving a RST_STREAM frame (#247).
+
+# 0.1.2 (March 13, 2018)
+
+* Fix another bug relating to resetting connections and reaching
+  max concurrency (#238).
+
+# 0.1.1 (March 8, 2018)
+
+* When streams are dropped, close the connection (#222).
+* Notify send tasks on connection error (#231).
+* Fix bug relating to resetting connections and reaching max concurrency (#235).
+* Normalize HTTP request path to satisfy HTTP/2.0 specification (#228).
+* Update internal dependencies.
+
+# 0.1.0 (Jan 12, 2018)
+
+* Initial release
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..8af0abc
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,84 @@
+# Contributing to _h2_ #
+
+:balloon: Thanks for your help improving the project!
+
+## Getting Help ##
+
+If you have a question about the h2 library or have encountered problems using it, you may
+[file an issue][issue] or ask a question on the [Tokio Gitter][gitter].
+
+## Submitting a Pull Request ##
+
+Do you have an improvement?
+
+1. Submit an [issue][issue] describing your proposed change.
+2. We will try to respond to your issue promptly.
+3. Fork this repo, develop and test your code changes. See the project's [README](README.md) for further information about working in this repository.
+4. Submit a pull request against this repo's `master` branch.
+5. Your branch may be merged once all configured checks pass, including:
+    - Code review has been completed.
+    - The branch has passed tests in CI.
+
+## Committing ##
+
+When initially submitting a pull request, we prefer a single squashed commit. It
+is preferable to split up contributions into multiple pull requests if the
+changes are unrelated. All pull requests are squashed when merged, but
+squashing yourself gives you better control over the commit message.
+
+After the pull request is submitted, all changes should be done in separate
+commits. This makes reviewing the evolution of the pull request easier. We will
+squash all the changes into a single commit when we merge the pull request.
+
+### Commit messages ###
+
+Finalized commit messages should be in the following format:
+
+```
+Subject
+
+Problem
+
+Solution
+
+Validation
+```
+
+#### Subject ####
+
+- one line, <= 50 characters
+- describe what is done; not the result
+- use the active voice
+- capitalize first word and proper nouns
+- do not end in a period — this is a title/subject
+- reference the github issue by number
+
+##### Examples #####
+
+```
+bad: server disconnects should cause dst client disconnects.
+good: Propagate disconnects from source to destination
+```
+
+```
+bad: support tls servers
+good: Introduce support for server-side TLS (#347)
+```
+
+#### Problem ####
+
+Explain the context and why you're making that change.  What is the problem
+you're trying to solve? In some cases there is not a problem and this can be
+thought of as being the motivation for your change.
+
+#### Solution ####
+
+Describe the modifications you've made.
+
+#### Validation ####
+
+Describe the testing you've done to validate your change.  Performance-related
+changes should include before- and after- benchmark results.
+
+[issue]: https://github.com/hyperium/h2/issues/new
+[gitter]: https://gitter.im/tokio-rs/tokio
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..25ee391
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,128 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.63"
+name = "h2"
+version = "0.4.4"
+authors = [
+    "Carl Lerche <[email protected]>",
+    "Sean McArthur <[email protected]>",
+]
+exclude = [
+    "fixtures/**",
+    "ci/**",
+]
+description = "An HTTP/2 client and server"
+documentation = "https://docs.rs/h2"
+readme = "README.md"
+keywords = [
+    "http",
+    "async",
+    "non-blocking",
+]
+categories = [
+    "asynchronous",
+    "web-programming",
+    "network-programming",
+]
+license = "MIT"
+repository = "https://github.com/hyperium/h2"
+
+[package.metadata.docs.rs]
+features = ["stream"]
+
+[dependencies.bytes]
+version = "1"
+
+[dependencies.fnv]
+version = "1.0.5"
+
+[dependencies.futures-core]
+version = "0.3"
+default-features = false
+
+[dependencies.futures-sink]
+version = "0.3"
+default-features = false
+
+[dependencies.futures-util]
+version = "0.3"
+default-features = false
+
+[dependencies.http]
+version = "1"
+
+[dependencies.indexmap]
+version = "2"
+features = ["std"]
+
+[dependencies.slab]
+version = "0.4.2"
+
+[dependencies.tokio]
+version = "1"
+features = ["io-util"]
+
+[dependencies.tokio-util]
+version = "0.7.1"
+features = [
+    "codec",
+    "io",
+]
+
+[dependencies.tracing]
+version = "0.1.35"
+features = ["std"]
+default-features = false
+
+[dev-dependencies.env_logger]
+version = "0.10"
+default-features = false
+
+[dev-dependencies.hex]
+version = "0.4.3"
+
+[dev-dependencies.quickcheck]
+version = "1.0.3"
+default-features = false
+
+[dev-dependencies.rand]
+version = "0.8.4"
+
+[dev-dependencies.serde]
+version = "1.0.0"
+
+[dev-dependencies.serde_json]
+version = "1.0.0"
+
+[dev-dependencies.tokio]
+version = "1"
+features = [
+    "rt-multi-thread",
+    "macros",
+    "sync",
+    "net",
+]
+
+[dev-dependencies.tokio-rustls]
+version = "0.24"
+
+[dev-dependencies.walkdir]
+version = "2.3.2"
+
+[dev-dependencies.webpki-roots]
+version = "0.25"
+
+[features]
+stream = []
+unstable = []
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..452d2e8
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,73 @@
+[package]
+name = "h2"
+# When releasing to crates.io:
+# - Update CHANGELOG.md.
+# - Create git tag
+version = "0.4.4"
+license = "MIT"
+authors = [
+  "Carl Lerche <[email protected]>",
+  "Sean McArthur <[email protected]>",
+]
+description = "An HTTP/2 client and server"
+documentation = "https://docs.rs/h2"
+repository = "https://github.com/hyperium/h2"
+readme = "README.md"
+keywords = ["http", "async", "non-blocking"]
+categories = ["asynchronous", "web-programming", "network-programming"]
+exclude = ["fixtures/**", "ci/**"]
+edition = "2021"
+rust-version = "1.63"
+
+[features]
+# Enables `futures::Stream` implementations for various types.
+stream = []
+
+# Enables **unstable** APIs. Any API exposed by this feature has no backwards
+# compatibility guarantees. In other words, you should not use this feature for
+# anything besides experimentation. Definitely **do not** publish a crate that
+# depends on this feature.
+unstable = []
+
+[workspace]
+members = [
+    "tests/h2-fuzz",
+    "tests/h2-tests",
+    "tests/h2-support",
+    "util/genfixture",
+    "util/genhuff",
+]
+
+[dependencies]
+futures-core = { version = "0.3", default-features = false }
+futures-sink = { version = "0.3", default-features = false }
+futures-util = { version = "0.3", default-features = false }
+tokio-util = { version = "0.7.1", features = ["codec", "io"] }
+tokio = { version = "1", features = ["io-util"] }
+bytes = "1"
+http = "1"
+tracing = { version = "0.1.35", default-features = false, features = ["std"] }
+fnv = "1.0.5"
+slab = "0.4.2"
+indexmap = { version = "2", features = ["std"] }
+
+[dev-dependencies]
+
+# Fuzzing
+quickcheck = { version = "1.0.3", default-features = false }
+rand = "0.8.4"
+
+# HPACK fixtures
+hex = "0.4.3"
+walkdir = "2.3.2"
+serde = "1.0.0"
+serde_json = "1.0.0"
+
+# Examples
+tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync", "net"] }
+env_logger = { version = "0.10", default-features = false }
+tokio-rustls = "0.24"
+webpki-roots = "0.25"
+
+[package.metadata.docs.rs]
+features = ["stream"]
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..11239dd
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2017 h2 authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..0179086
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,20 @@
+name: "h2"
+description: "An HTTP/2 client and server"
+third_party {
+  identifier {
+    type: "crates.io"
+    value: "h2"
+  }
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/h2/h2-0.4.4.crate"
+    primary_source: true
+  }
+  version: "0.4.4"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 6
+    day: 3
+  }
+}
diff --git a/MODULE_LICENSE_MIT b/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_MIT
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..48bea6e
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 688011
+include platform/prebuilts/rust:main:/OWNERS
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..f83357d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,71 @@
+# H2
+
+A Tokio aware, HTTP/2 client & server implementation for Rust.
+
+[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
+[![Crates.io](https://img.shields.io/crates/v/h2.svg)](https://crates.io/crates/h2)
+[![Documentation](https://docs.rs/h2/badge.svg)][dox]
+
+More information about this crate can be found in the [crate documentation][dox].
+
+[dox]: https://docs.rs/h2
+
+## Features
+
+* Client and server HTTP/2 implementation.
+* Implements the full HTTP/2 specification.
+* Passes [h2spec](https://github.com/summerwind/h2spec).
+* Focus on performance and correctness.
+* Built on [Tokio](https://tokio.rs).
+
+## Non goals
+
+This crate is intended to only be an implementation of the HTTP/2
+specification. It does not handle:
+
+* Managing TCP connections
+* HTTP 1.0 upgrade
+* TLS
+* Any feature not described by the HTTP/2 specification.
+
+This crate is now used by [hyper](https://github.com/hyperium/hyper), which will provide all of these features.
+
+## Usage
+
+To use `h2`, first add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+h2 = "0.4"
+```
+
+Next, add this to your crate:
+
+```rust
+extern crate h2;
+
+use h2::server::Connection;
+
+fn main() {
+    // ...
+}
+```
+
+## FAQ
+
+**How does h2 compare to [solicit] or [rust-http2]?**
+
+The h2 library has implemented more of the details of the HTTP/2 specification
+than any other Rust library. It also passes the [h2spec] set of tests. The h2
+library is rapidly approaching "production ready" quality.
+
+Besides the above, Solicit is built on blocking I/O and does not appear to be
+actively maintained.
+
+**Is this an embedded Java SQL database engine?**
+
+[No](https://www.h2database.com).
+
+[solicit]: https://github.com/mlalic/solicit
+[rust-http2]: https://github.com/stepancheg/rust-http2
+[h2spec]: https://github.com/summerwind/h2spec
diff --git a/cargo_embargo.json b/cargo_embargo.json
new file mode 100644
index 0000000..cb908d7
--- /dev/null
+++ b/cargo_embargo.json
@@ -0,0 +1,3 @@
+{
+  "run_cargo": false
+}
diff --git a/examples/akamai.rs b/examples/akamai.rs
new file mode 100644
index 0000000..8d87b77
--- /dev/null
+++ b/examples/akamai.rs
@@ -0,0 +1,81 @@
+use h2::client;
+use http::{Method, Request};
+use tokio::net::TcpStream;
+use tokio_rustls::TlsConnector;
+
+use tokio_rustls::rustls::{OwnedTrustAnchor, RootCertStore, ServerName};
+
+use std::error::Error;
+use std::net::ToSocketAddrs;
+
+const ALPN_H2: &str = "h2";
+
+#[tokio::main]
+pub async fn main() -> Result<(), Box<dyn Error>> {
+    let _ = env_logger::try_init();
+
+    let tls_client_config = std::sync::Arc::new({
+        let mut root_store = RootCertStore::empty();
+        root_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| {
+            OwnedTrustAnchor::from_subject_spki_name_constraints(
+                ta.subject,
+                ta.spki,
+                ta.name_constraints,
+            )
+        }));
+
+        let mut c = tokio_rustls::rustls::ClientConfig::builder()
+            .with_safe_defaults()
+            .with_root_certificates(root_store)
+            .with_no_client_auth();
+        c.alpn_protocols.push(ALPN_H2.as_bytes().to_owned());
+        c
+    });
+
+    // Sync DNS resolution.
+    let addr = "http2.akamai.com:443"
+        .to_socket_addrs()
+        .unwrap()
+        .next()
+        .unwrap();
+
+    println!("ADDR: {:?}", addr);
+
+    let tcp = TcpStream::connect(&addr).await?;
+    let dns_name = ServerName::try_from("http2.akamai.com").unwrap();
+    let connector = TlsConnector::from(tls_client_config);
+    let res = connector.connect(dns_name, tcp).await;
+    let tls = res.unwrap();
+    {
+        let (_, session) = tls.get_ref();
+        let negotiated_protocol = session.alpn_protocol();
+        assert_eq!(Some(ALPN_H2.as_bytes()), negotiated_protocol);
+    }
+
+    println!("Starting client handshake");
+    let (mut client, h2) = client::handshake(tls).await?;
+
+    println!("building request");
+    let request = Request::builder()
+        .method(Method::GET)
+        .uri("https://http2.akamai.com/")
+        .body(())
+        .unwrap();
+
+    println!("sending request");
+    let (response, other) = client.send_request(request, true).unwrap();
+
+    tokio::spawn(async move {
+        if let Err(e) = h2.await {
+            println!("GOT ERR={:?}", e);
+        }
+    });
+
+    println!("waiting on response : {:?}", other);
+    let (_, mut body) = response.await?.into_parts();
+    println!("processing body");
+    while let Some(chunk) = body.data().await {
+        println!("RX: {:?}", chunk?);
+    }
+    Ok(())
+}
diff --git a/examples/client.rs b/examples/client.rs
new file mode 100644
index 0000000..61e237a
--- /dev/null
+++ b/examples/client.rs
@@ -0,0 +1,52 @@
+use h2::client;
+use http::{HeaderMap, Request};
+
+use std::error::Error;
+
+use tokio::net::TcpStream;
+
+#[tokio::main]
+pub async fn main() -> Result<(), Box<dyn Error>> {
+    let _ = env_logger::try_init();
+
+    let tcp = TcpStream::connect("127.0.0.1:5928").await?;
+    let (mut client, h2) = client::handshake(tcp).await?;
+
+    println!("sending request");
+
+    let request = Request::builder()
+        .uri("https://http2.akamai.com/")
+        .body(())
+        .unwrap();
+
+    let mut trailers = HeaderMap::new();
+    trailers.insert("zomg", "hello".parse().unwrap());
+
+    let (response, mut stream) = client.send_request(request, false).unwrap();
+
+    // send trailers
+    stream.send_trailers(trailers).unwrap();
+
+    // Spawn a task to run the conn...
+    tokio::spawn(async move {
+        if let Err(e) = h2.await {
+            println!("GOT ERR={:?}", e);
+        }
+    });
+
+    let response = response.await?;
+    println!("GOT RESPONSE: {:?}", response);
+
+    // Get the body
+    let mut body = response.into_body();
+
+    while let Some(chunk) = body.data().await {
+        println!("GOT CHUNK = {:?}", chunk?);
+    }
+
+    if let Some(trailers) = body.trailers().await? {
+        println!("GOT TRAILERS: {:?}", trailers);
+    }
+
+    Ok(())
+}
diff --git a/examples/server.rs b/examples/server.rs
new file mode 100644
index 0000000..6d6490d
--- /dev/null
+++ b/examples/server.rs
@@ -0,0 +1,65 @@
+use std::error::Error;
+
+use bytes::Bytes;
+use h2::server::{self, SendResponse};
+use h2::RecvStream;
+use http::Request;
+use tokio::net::{TcpListener, TcpStream};
+
+#[tokio::main]
+async fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
+    let _ = env_logger::try_init();
+
+    let listener = TcpListener::bind("127.0.0.1:5928").await?;
+
+    println!("listening on {:?}", listener.local_addr());
+
+    loop {
+        if let Ok((socket, _peer_addr)) = listener.accept().await {
+            tokio::spawn(async move {
+                if let Err(e) = serve(socket).await {
+                    println!("  -> err={:?}", e);
+                }
+            });
+        }
+    }
+}
+
+async fn serve(socket: TcpStream) -> Result<(), Box<dyn Error + Send + Sync>> {
+    let mut connection = server::handshake(socket).await?;
+    println!("H2 connection bound");
+
+    while let Some(result) = connection.accept().await {
+        let (request, respond) = result?;
+        tokio::spawn(async move {
+            if let Err(e) = handle_request(request, respond).await {
+                println!("error while handling request: {}", e);
+            }
+        });
+    }
+
+    println!("~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~");
+    Ok(())
+}
+
+async fn handle_request(
+    mut request: Request<RecvStream>,
+    mut respond: SendResponse<Bytes>,
+) -> Result<(), Box<dyn Error + Send + Sync>> {
+    println!("GOT request: {:?}", request);
+
+    let body = request.body_mut();
+    while let Some(data) = body.data().await {
+        let data = data?;
+        println!("<<<< recv {:?}", data);
+        let _ = body.flow_control().release_capacity(data.len());
+    }
+
+    let response = http::Response::new(());
+    let mut send = respond.send_response(response, false)?;
+    println!(">>>> send");
+    send.send_data(Bytes::from_static(b"hello "), false)?;
+    send.send_data(Bytes::from_static(b"world\n"), true)?;
+
+    Ok(())
+}
diff --git a/src/client.rs b/src/client.rs
new file mode 100644
index 0000000..25b151f
--- /dev/null
+++ b/src/client.rs
@@ -0,0 +1,1673 @@
+//! Client implementation of the HTTP/2 protocol.
+//!
+//! # Getting started
+//!
+//! Running an HTTP/2 client requires the caller to establish the underlying
+//! connection as well as get the connection to a state that is ready to begin
+//! the HTTP/2 handshake. See [here](../index.html#handshake) for more
+//! details.
+//!
+//! This could be as basic as using Tokio's [`TcpStream`] to connect to a remote
+//! host, but usually it means using either ALPN or HTTP/1.1 protocol upgrades.
+//!
+//! Once a connection is obtained, it is passed to [`handshake`], which will
+//! begin the [HTTP/2 handshake]. This returns a future that completes once
+//! the handshake process is performed and HTTP/2 streams may be initialized.
+//!
+//! [`handshake`] uses default configuration values. There are a number of
+//! settings that can be changed by using [`Builder`] instead.
+//!
+//! Once the handshake future completes, the caller is provided with a
+//! [`Connection`] instance and a [`SendRequest`] instance. The [`Connection`]
+//! instance is used to drive the connection (see [Managing the connection]).
+//! The [`SendRequest`] instance is used to initialize new streams (see [Making
+//! requests]).
+//!
+//! # Making requests
+//!
+//! Requests are made using the [`SendRequest`] handle provided by the handshake
+//! future. Once a request is submitted, an HTTP/2 stream is initialized and
+//! the request is sent to the server.
+//!
+//! A request body and request trailers are sent using [`SendRequest`] and the
+//! server's response is returned once the [`ResponseFuture`] future completes.
+//! Both the [`SendStream`] and [`ResponseFuture`] instances are returned by
+//! [`SendRequest::send_request`] and are tied to the HTTP/2 stream
+//! initialized by the sent request.
+//!
+//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2
+//! stream can be created, i.e. as long as the current number of active streams
+//! is below [`MAX_CONCURRENT_STREAMS`]. If a new stream cannot be created, the
+//! caller will be notified once an existing stream closes, freeing capacity for
+//! the caller.  The caller should use [`SendRequest::poll_ready`] to check for
+//! capacity before sending a request to the server.
+//!
+//! [`SendRequest`] enforces the [`MAX_CONCURRENT_STREAMS`] setting. The user
+//! must not send a request if `poll_ready` does not return `Ready`. Attempting
+//! to do so will result in an [`Error`] being returned.
+//!
+//! # Managing the connection
+//!
+//! The [`Connection`] instance is used to manage connection state. The caller
+//! is required to call [`Connection::poll`] in order to advance state.
+//! [`SendRequest::send_request`] and other functions have no effect unless
+//! [`Connection::poll`] is called.
+//!
+//! The [`Connection`] instance should only be dropped once [`Connection::poll`]
+//! returns `Ready`. At this point, the underlying socket has been closed and no
+//! further work needs to be done.
+//!
+//! The easiest way to ensure that the [`Connection`] instance gets polled is to
+//! submit the [`Connection`] instance to an [executor]. The executor will then
+//! manage polling the connection until the connection is complete.
+//! Alternatively, the caller can call `poll` manually.
+//!
+//! # Example
+//!
+//! ```rust, no_run
+//!
+//! use h2::client;
+//!
+//! use http::{Request, Method};
+//! use std::error::Error;
+//! use tokio::net::TcpStream;
+//!
+//! #[tokio::main]
+//! pub async fn main() -> Result<(), Box<dyn Error>> {
+//!     // Establish TCP connection to the server.
+//!     let tcp = TcpStream::connect("127.0.0.1:5928").await?;
+//!     let (h2, connection) = client::handshake(tcp).await?;
+//!     tokio::spawn(async move {
+//!         connection.await.unwrap();
+//!     });
+//!
+//!     let mut h2 = h2.ready().await?;
+//!     // Prepare the HTTP request to send to the server.
+//!     let request = Request::builder()
+//!                     .method(Method::GET)
+//!                     .uri("https://www.example.com/")
+//!                     .body(())
+//!                     .unwrap();
+//!
+//!     // Send the request. The second tuple item allows the caller
+//!     // to stream a request body.
+//!     let (response, _) = h2.send_request(request, true).unwrap();
+//!
+//!     let (head, mut body) = response.await?.into_parts();
+//!
+//!     println!("Received response: {:?}", head);
+//!
+//!     // The `flow_control` handle allows the caller to manage
+//!     // flow control.
+//!     //
+//!     // Whenever data is received, the caller is responsible for
+//!     // releasing capacity back to the server once it has freed
+//!     // the data from memory.
+//!     let mut flow_control = body.flow_control().clone();
+//!
+//!     while let Some(chunk) = body.data().await {
+//!         let chunk = chunk?;
+//!         println!("RX: {:?}", chunk);
+//!
+//!         // Let the server send more data.
+//!         let _ = flow_control.release_capacity(chunk.len());
+//!     }
+//!
+//!     Ok(())
+//! }
+//! ```
+//!
+//! [`TcpStream`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpStream.html
+//! [`handshake`]: fn.handshake.html
+//! [executor]: https://docs.rs/futures/0.1/futures/future/trait.Executor.html
+//! [`SendRequest`]: struct.SendRequest.html
+//! [`SendStream`]: ../struct.SendStream.html
+//! [Making requests]: #making-requests
+//! [Managing the connection]: #managing-the-connection
+//! [`Connection`]: struct.Connection.html
+//! [`Connection::poll`]: struct.Connection.html#method.poll
+//! [`SendRequest::send_request`]: struct.SendRequest.html#method.send_request
+//! [`MAX_CONCURRENT_STREAMS`]: http://httpwg.org/specs/rfc7540.html#SettingValues
+//! [`SendRequest`]: struct.SendRequest.html
+//! [`ResponseFuture`]: struct.ResponseFuture.html
+//! [`SendRequest::poll_ready`]: struct.SendRequest.html#method.poll_ready
+//! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
+//! [`Builder`]: struct.Builder.html
+//! [`Error`]: ../struct.Error.html
+
+use crate::codec::{Codec, SendError, UserError};
+use crate::ext::Protocol;
+use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId};
+use crate::proto::{self, Error};
+use crate::{FlowControl, PingPong, RecvStream, SendStream};
+
+use bytes::{Buf, Bytes};
+use http::{uri, HeaderMap, Method, Request, Response, Version};
+use std::fmt;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::time::Duration;
+use std::usize;
+use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
+use tracing::Instrument;
+
+/// Initializes new HTTP/2 streams on a connection by sending a request.
+///
+/// This type does no work itself. Instead, it is a handle to the inner
+/// connection state held by [`Connection`]. If the associated connection
+/// instance is dropped, all `SendRequest` functions will return [`Error`].
+///
+/// [`SendRequest`] instances are able to move to and operate on separate tasks
+/// / threads than their associated [`Connection`] instance. Internally, there
+/// is a buffer used to stage requests before they get written to the
+/// connection. There is no guarantee that requests get written to the
+/// connection in FIFO order as HTTP/2 prioritization logic can play a role.
+///
+/// [`SendRequest`] implements [`Clone`], enabling the creation of many
+/// instances that are backed by a single connection.
+///
+/// See [module] level documentation for more details.
+///
+/// [module]: index.html
+/// [`Connection`]: struct.Connection.html
+/// [`Clone`]: https://doc.rust-lang.org/std/clone/trait.Clone.html
+/// [`Error`]: ../struct.Error.html
+pub struct SendRequest<B: Buf> {
+    inner: proto::Streams<B, Peer>,
+    pending: Option<proto::OpaqueStreamRef>,
+}
+
+/// Returns a `SendRequest` instance once it is ready to send at least one
+/// request.
+#[derive(Debug)]
+pub struct ReadySendRequest<B: Buf> {
+    inner: Option<SendRequest<B>>,
+}
+
+/// Manages all state associated with an HTTP/2 client connection.
+///
+/// A `Connection` is backed by an I/O resource (usually a TCP socket) and
+/// implements the HTTP/2 client logic for that connection. It is responsible
+/// for driving the internal state forward, performing the work requested of the
+/// associated handles ([`SendRequest`], [`ResponseFuture`], [`SendStream`],
+/// [`RecvStream`]).
+///
+/// `Connection` values are created by calling [`handshake`]. Once a
+/// `Connection` value is obtained, the caller must repeatedly call [`poll`]
+/// until `Ready` is returned. The easiest way to do this is to submit the
+/// `Connection` instance to an [executor].
+///
+/// [module]: index.html
+/// [`handshake`]: fn.handshake.html
+/// [`SendRequest`]: struct.SendRequest.html
+/// [`ResponseFuture`]: struct.ResponseFuture.html
+/// [`SendStream`]: ../struct.SendStream.html
+/// [`RecvStream`]: ../struct.RecvStream.html
+/// [`poll`]: #method.poll
+/// [executor]: https://docs.rs/futures/0.1/futures/future/trait.Executor.html
+///
+/// # Examples
+///
+/// ```
+/// # use tokio::io::{AsyncRead, AsyncWrite};
+/// # use h2::client;
+/// # use h2::client::*;
+/// #
+/// # async fn doc<T>(my_io: T) -> Result<(), h2::Error>
+/// # where T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
+/// # {
+///     let (send_request, connection) = client::handshake(my_io).await?;
+///     // Submit the connection handle to an executor.
+///     tokio::spawn(async { connection.await.expect("connection failed"); });
+///
+///     // Now, use `send_request` to initialize HTTP/2 streams.
+///     // ...
+/// # Ok(())
+/// # }
+/// #
+/// # pub fn main() {}
+/// ```
+#[must_use = "futures do nothing unless polled"]
+pub struct Connection<T, B: Buf = Bytes> {
+    inner: proto::Connection<T, Peer, B>,
+}
+
+/// A future of an HTTP response.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct ResponseFuture {
+    inner: proto::OpaqueStreamRef,
+    push_promise_consumed: bool,
+}
+
+/// A future of a pushed HTTP response.
+///
+/// We have to differentiate between pushed and non pushed because of the spec
+/// <https://httpwg.org/specs/rfc7540.html#PUSH_PROMISE>
+/// > PUSH_PROMISE frames MUST only be sent on a peer-initiated stream
+/// > that is in either the "open" or "half-closed (remote)" state.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct PushedResponseFuture {
+    inner: ResponseFuture,
+}
+
+/// A pushed response and corresponding request headers
+#[derive(Debug)]
+pub struct PushPromise {
+    /// The request headers
+    request: Request<()>,
+
+    /// The pushed response
+    response: PushedResponseFuture,
+}
+
+/// A stream of pushed responses and corresponding promised requests
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct PushPromises {
+    inner: proto::OpaqueStreamRef,
+}
+
+/// Builds client connections with custom configuration values.
+///
+/// Methods can be chained in order to set the configuration values.
+///
+/// The client is constructed by calling [`handshake`] and passing the I/O
+/// handle that will back the HTTP/2 server.
+///
+/// New instances of `Builder` are obtained via [`Builder::new`].
+///
+/// See function level documentation for details on the various client
+/// configuration settings.
+///
+/// [`Builder::new`]: struct.Builder.html#method.new
+/// [`handshake`]: struct.Builder.html#method.handshake
+///
+/// # Examples
+///
+/// ```
+/// # use tokio::io::{AsyncRead, AsyncWrite};
+/// # use h2::client::*;
+/// # use bytes::Bytes;
+/// #
+/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+///     -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+/// # {
+/// // `client_fut` is a future representing the completion of the HTTP/2
+/// // handshake.
+/// let client_fut = Builder::new()
+///     .initial_window_size(1_000_000)
+///     .max_concurrent_streams(1000)
+///     .handshake(my_io);
+/// # client_fut.await
+/// # }
+/// #
+/// # pub fn main() {}
+/// ```
+#[derive(Clone, Debug)]
+pub struct Builder {
+    /// Time to keep locally reset streams around before reaping.
+    reset_stream_duration: Duration,
+
+    /// Initial maximum number of locally initiated (send) streams.
+    /// After receiving a SETTINGS frame from the remote peer,
+    /// the connection will overwrite this value with the
+    /// MAX_CONCURRENT_STREAMS specified in the frame.
+    /// If no value is advertised by the remote peer in the initial SETTINGS
+    /// frame, it will be set to usize::MAX.
+    initial_max_send_streams: usize,
+
+    /// Initial target window size for new connections.
+    initial_target_connection_window_size: Option<u32>,
+
+    /// Maximum amount of bytes to "buffer" for writing per stream.
+    max_send_buffer_size: usize,
+
+    /// Maximum number of locally reset streams to keep at a time.
+    reset_stream_max: usize,
+
+    /// Maximum number of remotely reset streams to allow in the pending
+    /// accept queue.
+    pending_accept_reset_stream_max: usize,
+
+    /// Initial `Settings` frame to send as part of the handshake.
+    settings: Settings,
+
+    /// The stream ID of the first (lowest) stream. Subsequent streams will use
+    /// monotonically increasing stream IDs.
+    stream_id: StreamId,
+
+    /// Maximum number of locally reset streams due to protocol error across
+    /// the lifetime of the connection.
+    ///
+    /// When this gets exceeded, we issue GOAWAYs.
+    local_max_error_reset_streams: Option<usize>,
+}
+
+#[derive(Debug)]
+pub(crate) struct Peer;
+
+// ===== impl SendRequest =====
+
+impl<B> SendRequest<B>
+where
+    B: Buf,
+{
+    /// Returns `Ready` when the connection can initialize a new HTTP/2
+    /// stream.
+    ///
+    /// This function must return `Ready` before `send_request` is called. When
+    /// `Poll::Pending` is returned, the task will be notified once the readiness
+    /// state changes.
+    ///
+    /// See [module] level docs for more details.
+    ///
+    /// [module]: index.html
+    pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), crate::Error>> {
+        ready!(self.inner.poll_pending_open(cx, self.pending.as_ref()))?;
+        self.pending = None;
+        Poll::Ready(Ok(()))
+    }
+
+    /// Consumes `self`, returning a future that returns `self` back once it is
+    /// ready to send a request.
+    ///
+    /// This function should be called before calling `send_request`.
+    ///
+    /// This is a functional combinator for [`poll_ready`]. The returned future
+    /// will call `SendStream::poll_ready` until `Ready`, then returns `self` to
+    /// the caller.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// # use h2::client::*;
+    /// # use http::*;
+    /// # async fn doc(send_request: SendRequest<&'static [u8]>)
+    /// # {
+    /// // First, wait until the `send_request` handle is ready to send a new
+    /// // request
+    /// let mut send_request = send_request.ready().await.unwrap();
+    /// // Use `send_request` here.
+    /// # }
+    /// # pub fn main() {}
+    /// ```
+    ///
+    /// See [module] level docs for more details.
+    ///
+    /// [`poll_ready`]: #method.poll_ready
+    /// [module]: index.html
+    pub fn ready(self) -> ReadySendRequest<B> {
+        ReadySendRequest { inner: Some(self) }
+    }
+
+    /// Sends a HTTP/2 request to the server.
+    ///
+    /// `send_request` initializes a new HTTP/2 stream on the associated
+    /// connection, then sends the given request using this new stream. Only the
+    /// request head is sent.
+    ///
+    /// On success, a [`ResponseFuture`] instance and [`SendStream`] instance
+    /// are returned. The [`ResponseFuture`] instance is used to get the
+    /// server's response and the [`SendStream`] instance is used to send a
+    /// request body or trailers to the server over the same HTTP/2 stream.
+    ///
+    /// To send a request body or trailers, set `end_of_stream` to `false`.
+    /// Then, use the returned [`SendStream`] instance to stream request body
+    /// chunks or send trailers. If `end_of_stream` is **not** set to `false`
+    /// then attempting to call [`SendStream::send_data`] or
+    /// [`SendStream::send_trailers`] will result in an error.
+    ///
+    /// If no request body or trailers are to be sent, set `end_of_stream` to
+    /// `true` and drop the returned [`SendStream`] instance.
+    ///
+    /// # A note on HTTP versions
+    ///
+    /// The provided `Request` will be encoded differently depending on the
+    /// value of its version field. If the version is set to 2.0, then the
+    /// request is encoded as per the specification recommends.
+    ///
+    /// If the version is set to a lower value, then the request is encoded to
+    /// preserve the characteristics of HTTP 1.1 and lower. Specifically, host
+    /// headers are permitted and the `:authority` pseudo header is not
+    /// included.
+    ///
+    /// The caller should always set the request's version field to 2.0 unless
+    /// specifically transmitting an HTTP 1.1 request over 2.0.
+    ///
+    /// # Examples
+    ///
+    /// Sending a request with no body
+    ///
+    /// ```rust
+    /// # use h2::client::*;
+    /// # use http::*;
+    /// # async fn doc(send_request: SendRequest<&'static [u8]>)
+    /// # {
+    /// // First, wait until the `send_request` handle is ready to send a new
+    /// // request
+    /// let mut send_request = send_request.ready().await.unwrap();
+    /// // Prepare the HTTP request to send to the server.
+    /// let request = Request::get("https://www.example.com/")
+    ///     .body(())
+    ///     .unwrap();
+    ///
+    /// // Send the request to the server. Since we are not sending a
+    /// // body or trailers, we can drop the `SendStream` instance.
+    /// let (response, _) = send_request.send_request(request, true).unwrap();
+    /// let response = response.await.unwrap();
+    /// // Process the response
+    /// # }
+    /// # pub fn main() {}
+    /// ```
+    ///
+    /// Sending a request with a body and trailers
+    ///
+    /// ```rust
+    /// # use h2::client::*;
+    /// # use http::*;
+    /// # async fn doc(send_request: SendRequest<&'static [u8]>)
+    /// # {
+    /// // First, wait until the `send_request` handle is ready to send a new
+    /// // request
+    /// let mut send_request = send_request.ready().await.unwrap();
+    ///
+    /// // Prepare the HTTP request to send to the server.
+    /// let request = Request::get("https://www.example.com/")
+    ///     .body(())
+    ///     .unwrap();
+    ///
+    /// // Send the request to the server. If we are not sending a
+    /// // body or trailers, we can drop the `SendStream` instance.
+    /// let (response, mut send_stream) = send_request
+    ///     .send_request(request, false).unwrap();
+    ///
+    /// // At this point, one option would be to wait for send capacity.
+    /// // Doing so would allow us to not hold data in memory that
+    /// // cannot be sent. However, this is not a requirement, so this
+    /// // example will skip that step. See `SendStream` documentation
+    /// // for more details.
+    /// send_stream.send_data(b"hello", false).unwrap();
+    /// send_stream.send_data(b"world", false).unwrap();
+    ///
+    /// // Send the trailers.
+    /// let mut trailers = HeaderMap::new();
+    /// trailers.insert(
+    ///     header::HeaderName::from_bytes(b"my-trailer").unwrap(),
+    ///     header::HeaderValue::from_bytes(b"hello").unwrap());
+    ///
+    /// send_stream.send_trailers(trailers).unwrap();
+    ///
+    /// let response = response.await.unwrap();
+    /// // Process the response
+    /// # }
+    /// # pub fn main() {}
+    /// ```
+    ///
+    /// [`ResponseFuture`]: struct.ResponseFuture.html
+    /// [`SendStream`]: ../struct.SendStream.html
+    /// [`SendStream::send_data`]: ../struct.SendStream.html#method.send_data
+    /// [`SendStream::send_trailers`]: ../struct.SendStream.html#method.send_trailers
+    pub fn send_request(
+        &mut self,
+        request: Request<()>,
+        end_of_stream: bool,
+    ) -> Result<(ResponseFuture, SendStream<B>), crate::Error> {
+        self.inner
+            .send_request(request, end_of_stream, self.pending.as_ref())
+            .map_err(Into::into)
+            .map(|(stream, is_full)| {
+                if stream.is_pending_open() && is_full {
+                    // Only prevent sending another request when the request queue
+                    // is not full.
+                    self.pending = Some(stream.clone_to_opaque());
+                }
+
+                let response = ResponseFuture {
+                    inner: stream.clone_to_opaque(),
+                    push_promise_consumed: false,
+                };
+
+                let stream = SendStream::new(stream);
+
+                (response, stream)
+            })
+    }
+
+    /// Returns whether the [extended CONNECT protocol][1] is enabled or not.
+    ///
+    /// This setting is configured by the server peer by sending the
+    /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame.
+    /// This method returns the currently acknowledged value received from the
+    /// remote.
+    ///
+    /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+    /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3
+    pub fn is_extended_connect_protocol_enabled(&self) -> bool {
+        self.inner.is_extended_connect_protocol_enabled()
+    }
+}
+
+impl<B> fmt::Debug for SendRequest<B>
+where
+    B: Buf,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("SendRequest").finish()
+    }
+}
+
+impl<B> Clone for SendRequest<B>
+where
+    B: Buf,
+{
+    fn clone(&self) -> Self {
+        SendRequest {
+            inner: self.inner.clone(),
+            pending: None,
+        }
+    }
+}
+
+#[cfg(feature = "unstable")]
+impl<B> SendRequest<B>
+where
+    B: Buf,
+{
+    /// Returns the number of active streams.
+    ///
+    /// An active stream is a stream that has not yet transitioned to a closed
+    /// state.
+    pub fn num_active_streams(&self) -> usize {
+        self.inner.num_active_streams()
+    }
+
+    /// Returns the number of streams that are held in memory.
+    ///
+    /// A wired stream is a stream that is either active or is closed but must
+    /// stay in memory for some reason. For example, there are still outstanding
+    /// userspace handles pointing to the slot.
+    pub fn num_wired_streams(&self) -> usize {
+        self.inner.num_wired_streams()
+    }
+}
+
+// ===== impl ReadySendRequest =====
+
+impl<B> Future for ReadySendRequest<B>
+where
+    B: Buf,
+{
+    type Output = Result<SendRequest<B>, crate::Error>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        match &mut self.inner {
+            Some(send_request) => {
+                ready!(send_request.poll_ready(cx))?;
+            }
+            None => panic!("called `poll` after future completed"),
+        }
+
+        Poll::Ready(Ok(self.inner.take().unwrap()))
+    }
+}
+
+// ===== impl Builder =====
+
+impl Builder {
+    /// Returns a new client builder instance initialized with default
+    /// configuration values.
+    ///
+    /// Configuration methods can be chained on the return value.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .initial_window_size(1_000_000)
+    ///     .max_concurrent_streams(1000)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn new() -> Builder {
+        Builder {
+            max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE,
+            reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS),
+            reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX,
+            pending_accept_reset_stream_max: proto::DEFAULT_REMOTE_RESET_STREAM_MAX,
+            initial_target_connection_window_size: None,
+            initial_max_send_streams: usize::MAX,
+            settings: Default::default(),
+            stream_id: 1.into(),
+            local_max_error_reset_streams: Some(proto::DEFAULT_LOCAL_RESET_COUNT_MAX),
+        }
+    }
+
+    /// Indicates the initial window size (in octets) for stream-level
+    /// flow control for received data.
+    ///
+    /// The initial window of a stream is used as part of flow control. For more
+    /// details, see [`FlowControl`].
+    ///
+    /// The default value is 65,535.
+    ///
+    /// [`FlowControl`]: ../struct.FlowControl.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .initial_window_size(1_000_000)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn initial_window_size(&mut self, size: u32) -> &mut Self {
+        self.settings.set_initial_window_size(Some(size));
+        self
+    }
+
+    /// Indicates the initial window size (in octets) for connection-level flow control
+    /// for received data.
+    ///
+    /// The initial window of a connection is used as part of flow control. For more details,
+    /// see [`FlowControl`].
+    ///
+    /// The default value is 65,535.
+    ///
+    /// [`FlowControl`]: ../struct.FlowControl.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .initial_connection_window_size(1_000_000)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn initial_connection_window_size(&mut self, size: u32) -> &mut Self {
+        self.initial_target_connection_window_size = Some(size);
+        self
+    }
+
+    /// Indicates the size (in octets) of the largest HTTP/2 frame payload that the
+    /// configured client is able to accept.
+    ///
+    /// The sender may send data frames that are **smaller** than this value,
+    /// but any data larger than `max` will be broken up into multiple `DATA`
+    /// frames.
+    ///
+    /// The value **must** be between 16,384 and 16,777,215. The default value is 16,384.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .max_frame_size(1_000_000)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if `max` is not within the legal range specified
+    /// above.
+    pub fn max_frame_size(&mut self, max: u32) -> &mut Self {
+        self.settings.set_max_frame_size(Some(max));
+        self
+    }
+
+    /// Sets the max size of received header frames.
+    ///
+    /// This advisory setting informs a peer of the maximum size of header list
+    /// that the sender is prepared to accept, in octets. The value is based on
+    /// the uncompressed size of header fields, including the length of the name
+    /// and value in octets plus an overhead of 32 octets for each header field.
+    ///
+    /// This setting is also used to limit the maximum amount of data that is
+    /// buffered to decode HEADERS frames.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .max_header_list_size(16 * 1024)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn max_header_list_size(&mut self, max: u32) -> &mut Self {
+        self.settings.set_max_header_list_size(Some(max));
+        self
+    }
+
+    /// Sets the maximum number of concurrent streams.
+    ///
+    /// The maximum concurrent streams setting only controls the maximum number
+    /// of streams that can be initiated by the remote peer. In other words,
+    /// when this setting is set to 100, this does not limit the number of
+    /// concurrent streams that can be created by the caller.
+    ///
+    /// It is recommended that this value be no smaller than 100, so as to not
+    /// unnecessarily limit parallelism. However, any value is legal, including
+    /// 0. If `max` is set to 0, then the remote will not be permitted to
+    /// initiate streams.
+    ///
+    /// Note that streams in the reserved state, i.e., push promises that have
+    /// been reserved but the stream has not started, do not count against this
+    /// setting.
+    ///
+    /// Also note that if the remote *does* exceed the value set here, it is not
+    /// a protocol level error. Instead, the `h2` library will immediately reset
+    /// the stream.
+    ///
+    /// See [Section 5.1.2] in the HTTP/2 spec for more details.
+    ///
+    /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .max_concurrent_streams(1000)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn max_concurrent_streams(&mut self, max: u32) -> &mut Self {
+        self.settings.set_max_concurrent_streams(Some(max));
+        self
+    }
+
+    /// Sets the initial maximum of locally initiated (send) streams.
+    ///
+    /// The initial settings will be overwritten by the remote peer when
+    /// the SETTINGS frame is received. The new value will be set to the
+    /// `max_concurrent_streams()` from the frame. If no value is advertised in
+    /// the initial SETTINGS frame from the remote peer as part of
+    /// [HTTP/2 Connection Preface], `usize::MAX` will be set.
+    ///
+    /// This setting prevents the caller from exceeding this number of
+    /// streams that are counted towards the concurrency limit.
+    ///
+    /// Sending streams past the limit returned by the peer will be treated
+    /// as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM.
+    ///
+    /// See [Section 5.1.2] in the HTTP/2 spec for more details.
+    ///
+    /// The default value is `usize::MAX`.
+    ///
+    /// [HTTP/2 Connection Preface]: https://httpwg.org/specs/rfc9113.html#preface
+    /// [Section 5.1.2]: https://httpwg.org/specs/rfc9113.html#rfc.section.5.1.2
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .initial_max_send_streams(1000)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn initial_max_send_streams(&mut self, initial: usize) -> &mut Self {
+        self.initial_max_send_streams = initial;
+        self
+    }
+
+    /// Sets the maximum number of concurrent locally reset streams.
+    ///
+    /// When a stream is explicitly reset, the HTTP/2 specification requires
+    /// that any further frames received for that stream must be ignored for
+    /// "some time".
+    ///
+    /// In order to satisfy the specification, internal state must be maintained
+    /// to implement the behavior. This state grows linearly with the number of
+    /// streams that are locally reset.
+    ///
+    /// The `max_concurrent_reset_streams` setting configures sets an upper
+    /// bound on the amount of state that is maintained. When this max value is
+    /// reached, the oldest reset stream is purged from memory.
+    ///
+    /// Once the stream has been fully purged from memory, any additional frames
+    /// received for that stream will result in a connection level protocol
+    /// error, forcing the connection to terminate.
+    ///
+    /// The default value is 10.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .max_concurrent_reset_streams(1000)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
+        self.reset_stream_max = max;
+        self
+    }
+
+    /// Sets the duration to remember locally reset streams.
+    ///
+    /// When a stream is explicitly reset, the HTTP/2 specification requires
+    /// that any further frames received for that stream must be ignored for
+    /// "some time".
+    ///
+    /// In order to satisfy the specification, internal state must be maintained
+    /// to implement the behavior. This state grows linearly with the number of
+    /// streams that are locally reset.
+    ///
+    /// The `reset_stream_duration` setting configures the max amount of time
+    /// this state will be maintained in memory. Once the duration elapses, the
+    /// stream state is purged from memory.
+    ///
+    /// Once the stream has been fully purged from memory, any additional frames
+    /// received for that stream will result in a connection level protocol
+    /// error, forcing the connection to terminate.
+    ///
+    /// The default value is 30 seconds.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use std::time::Duration;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .reset_stream_duration(Duration::from_secs(10))
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn reset_stream_duration(&mut self, dur: Duration) -> &mut Self {
+        self.reset_stream_duration = dur;
+        self
+    }
+
+    /// Sets the maximum number of local resets due to protocol errors made by the remote end.
+    ///
+    /// Invalid frames and many other protocol errors will lead to resets being generated for those streams.
+    /// Too many of these often indicate a malicious client, and there are attacks which can abuse this to DOS servers.
+    /// This limit protects against these DOS attacks by limiting the amount of resets we can be forced to generate.
+    ///
+    /// When the number of local resets exceeds this threshold, the client will close the connection.
+    ///
+    /// If you really want to disable this, supply [`Option::None`] here.
+    /// Disabling this is not recommended and may expose you to DOS attacks.
+    ///
+    /// The default value is currently 1024, but could change.
+    pub fn max_local_error_reset_streams(&mut self, max: Option<usize>) -> &mut Self {
+        self.local_max_error_reset_streams = max;
+        self
+    }
+
+    /// Sets the maximum number of pending-accept remotely-reset streams.
+    ///
+    /// Streams that have been received by the peer, but not accepted by the
+    /// user, can also receive a RST_STREAM. This is a legitimate pattern: one
+    /// could send a request and then shortly after, realize it is not needed,
+    /// sending a CANCEL.
+    ///
+    /// However, since those streams are now "closed", they don't count towards
+    /// the max concurrent streams. So, they will sit in the accept queue,
+    /// using memory.
+    ///
+    /// When the number of remotely-reset streams sitting in the pending-accept
+    /// queue reaches this maximum value, a connection error with the code of
+    /// `ENHANCE_YOUR_CALM` will be sent to the peer, and returned by the
+    /// `Future`.
+    ///
+    /// The default value is currently 20, but could change.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .max_pending_accept_reset_streams(100)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn max_pending_accept_reset_streams(&mut self, max: usize) -> &mut Self {
+        self.pending_accept_reset_stream_max = max;
+        self
+    }
+
+    /// Sets the maximum send buffer size per stream.
+    ///
+    /// Once a stream has buffered up to (or over) the maximum, the stream's
+    /// flow control will not "poll" additional capacity. Once bytes for the
+    /// stream have been written to the connection, the send buffer capacity
+    /// will be freed up again.
+    ///
+    /// The default is currently ~400KB, but may change.
+    ///
+    /// # Panics
+    ///
+    /// This function panics if `max` is larger than `u32::MAX`.
+    pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self {
+        assert!(max <= std::u32::MAX as usize);
+        self.max_send_buffer_size = max;
+        self
+    }
+
+    /// Enables or disables server push promises.
+    ///
+    /// This value is included in the initial SETTINGS handshake.
+    /// Setting this value to value to
+    /// false in the initial SETTINGS handshake guarantees that the remote server
+    /// will never send a push promise.
+    ///
+    /// This setting can be changed during the life of a single HTTP/2
+    /// connection by sending another settings frame updating the value.
+    ///
+    /// Default value: `true`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use std::time::Duration;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .enable_push(false)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn enable_push(&mut self, enabled: bool) -> &mut Self {
+        self.settings.set_enable_push(enabled);
+        self
+    }
+
+    /// Sets the header table size.
+    ///
+    /// This setting informs the peer of the maximum size of the header compression
+    /// table used to encode header blocks, in octets. The encoder may select any value
+    /// equal to or less than the header table size specified by the sender.
+    ///
+    /// The default value is 4,096.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .header_table_size(1_000_000)
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn header_table_size(&mut self, size: u32) -> &mut Self {
+        self.settings.set_header_table_size(Some(size));
+        self
+    }
+
+    /// Sets the first stream ID to something other than 1.
+    #[cfg(feature = "unstable")]
+    pub fn initial_stream_id(&mut self, stream_id: u32) -> &mut Self {
+        self.stream_id = stream_id.into();
+        assert!(
+            self.stream_id.is_client_initiated(),
+            "stream id must be odd"
+        );
+        self
+    }
+
+    /// Creates a new configured HTTP/2 client backed by `io`.
+    ///
+    /// It is expected that `io` already be in an appropriate state to commence
+    /// the [HTTP/2 handshake]. The handshake is completed once both the connection
+    /// preface and the initial settings frame is sent by the client.
+    ///
+    /// The handshake future does not wait for the initial settings frame from the
+    /// server.
+    ///
+    /// Returns a future which resolves to the [`Connection`] / [`SendRequest`]
+    /// tuple once the HTTP/2 handshake has been completed.
+    ///
+    /// This function also allows the caller to configure the send payload data
+    /// type. See [Outbound data type] for more details.
+    ///
+    /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
+    /// [`Connection`]: struct.Connection.html
+    /// [`SendRequest`]: struct.SendRequest.html
+    /// [Outbound data type]: ../index.html#outbound-data-type.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// # use bytes::Bytes;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    ///     -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .handshake(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    ///
+    /// Configures the send-payload data type. In this case, the outbound data
+    /// type will be `&'static [u8]`.
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::client::*;
+    /// #
+    /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Result<((SendRequest<&'static [u8]>, Connection<T, &'static [u8]>)), h2::Error>
+    /// # {
+    /// // `client_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let client_fut = Builder::new()
+    ///     .handshake::<_, &'static [u8]>(my_io);
+    /// # client_fut.await
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn handshake<T, B>(
+        &self,
+        io: T,
+    ) -> impl Future<Output = Result<(SendRequest<B>, Connection<T, B>), crate::Error>>
+    where
+        T: AsyncRead + AsyncWrite + Unpin,
+        B: Buf,
+    {
+        Connection::handshake2(io, self.clone())
+    }
+}
+
+impl Default for Builder {
+    fn default() -> Builder {
+        Builder::new()
+    }
+}
+
+/// Creates a new configured HTTP/2 client with default configuration
+/// values backed by `io`.
+///
+/// It is expected that `io` already be in an appropriate state to commence
+/// the [HTTP/2 handshake]. See [Handshake] for more details.
+///
+/// Returns a future which resolves to the [`Connection`] / [`SendRequest`]
+/// tuple once the HTTP/2 handshake has been completed. The returned
+/// [`Connection`] instance will be using default configuration values. Use
+/// [`Builder`] to customize the configuration values used by a [`Connection`]
+/// instance.
+///
+/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
+/// [Handshake]: ../index.html#handshake
+/// [`Connection`]: struct.Connection.html
+/// [`SendRequest`]: struct.SendRequest.html
+///
+/// # Examples
+///
+/// ```
+/// # use tokio::io::{AsyncRead, AsyncWrite};
+/// # use h2::client;
+/// # use h2::client::*;
+/// #
+/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) -> Result<(), h2::Error>
+/// # {
+/// let (send_request, connection) = client::handshake(my_io).await?;
+/// // The HTTP/2 handshake has completed, now start polling
+/// // `connection` and use `send_request` to send requests to the
+/// // server.
+/// # Ok(())
+/// # }
+/// #
+/// # pub fn main() {}
+/// ```
+pub async fn handshake<T>(io: T) -> Result<(SendRequest<Bytes>, Connection<T, Bytes>), crate::Error>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+{
+    let builder = Builder::new();
+    builder
+        .handshake(io)
+        .instrument(tracing::trace_span!("client_handshake"))
+        .await
+}
+
+// ===== impl Connection =====
+
+async fn bind_connection<T>(io: &mut T) -> Result<(), crate::Error>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+{
+    tracing::debug!("binding client connection");
+
+    let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
+    io.write_all(msg).await.map_err(crate::Error::from_io)?;
+
+    tracing::debug!("client connection bound");
+
+    Ok(())
+}
+
+impl<T, B> Connection<T, B>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+    B: Buf,
+{
+    async fn handshake2(
+        mut io: T,
+        builder: Builder,
+    ) -> Result<(SendRequest<B>, Connection<T, B>), crate::Error> {
+        bind_connection(&mut io).await?;
+
+        // Create the codec
+        let mut codec = Codec::new(io);
+
+        if let Some(max) = builder.settings.max_frame_size() {
+            codec.set_max_recv_frame_size(max as usize);
+        }
+
+        if let Some(max) = builder.settings.max_header_list_size() {
+            codec.set_max_recv_header_list_size(max as usize);
+        }
+
+        // Send initial settings frame
+        codec
+            .buffer(builder.settings.clone().into())
+            .expect("invalid SETTINGS frame");
+
+        let inner = proto::Connection::new(
+            codec,
+            proto::Config {
+                next_stream_id: builder.stream_id,
+                initial_max_send_streams: builder.initial_max_send_streams,
+                max_send_buffer_size: builder.max_send_buffer_size,
+                reset_stream_duration: builder.reset_stream_duration,
+                reset_stream_max: builder.reset_stream_max,
+                remote_reset_stream_max: builder.pending_accept_reset_stream_max,
+                local_error_reset_streams_max: builder.local_max_error_reset_streams,
+                settings: builder.settings.clone(),
+            },
+        );
+        let send_request = SendRequest {
+            inner: inner.streams().clone(),
+            pending: None,
+        };
+
+        let mut connection = Connection { inner };
+        if let Some(sz) = builder.initial_target_connection_window_size {
+            connection.set_target_window_size(sz);
+        }
+
+        Ok((send_request, connection))
+    }
+
+    /// Sets the target window size for the whole connection.
+    ///
+    /// If `size` is greater than the current value, then a `WINDOW_UPDATE`
+    /// frame will be immediately sent to the remote, increasing the connection
+    /// level window by `size - current_value`.
+    ///
+    /// If `size` is less than the current value, nothing will happen
+    /// immediately. However, as window capacity is released by
+    /// [`FlowControl`] instances, no `WINDOW_UPDATE` frames will be sent
+    /// out until the number of "in flight" bytes drops below `size`.
+    ///
+    /// The default value is 65,535.
+    ///
+    /// See [`FlowControl`] documentation for more details.
+    ///
+    /// [`FlowControl`]: ../struct.FlowControl.html
+    /// [library level]: ../index.html#flow-control
+    pub fn set_target_window_size(&mut self, size: u32) {
+        assert!(size <= proto::MAX_WINDOW_SIZE);
+        self.inner.set_target_window_size(size);
+    }
+
+    /// Set a new `INITIAL_WINDOW_SIZE` setting (in octets) for stream-level
+    /// flow control for received data.
+    ///
+    /// The `SETTINGS` will be sent to the remote, and only applied once the
+    /// remote acknowledges the change.
+    ///
+    /// This can be used to increase or decrease the window size for existing
+    /// streams.
+    ///
+    /// # Errors
+    ///
+    /// Returns an error if a previous call is still pending acknowledgement
+    /// from the remote endpoint.
+    pub fn set_initial_window_size(&mut self, size: u32) -> Result<(), crate::Error> {
+        assert!(size <= proto::MAX_WINDOW_SIZE);
+        self.inner.set_initial_window_size(size)?;
+        Ok(())
+    }
+
+    /// Takes a `PingPong` instance from the connection.
+    ///
+    /// # Note
+    ///
+    /// This may only be called once. Calling multiple times will return `None`.
+    pub fn ping_pong(&mut self) -> Option<PingPong> {
+        self.inner.take_user_pings().map(PingPong::new)
+    }
+
+    /// Returns the maximum number of concurrent streams that may be initiated
+    /// by this client.
+    ///
+    /// This limit is configured by the server peer by sending the
+    /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame.
+    /// This method returns the currently acknowledged value received from the
+    /// remote.
+    ///
+    /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2
+    pub fn max_concurrent_send_streams(&self) -> usize {
+        self.inner.max_send_streams()
+    }
+    /// Returns the maximum number of concurrent streams that may be initiated
+    /// by the server on this connection.
+    ///
+    /// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS`
+    /// parameter][1] sent in a `SETTINGS` frame that has been
+    /// acknowledged by the remote peer. The value to be sent is configured by
+    /// the [`Builder::max_concurrent_streams`][2] method before handshaking
+    /// with the remote peer.
+    ///
+    /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2
+    /// [2]: ../struct.Builder.html#method.max_concurrent_streams
+    pub fn max_concurrent_recv_streams(&self) -> usize {
+        self.inner.max_recv_streams()
+    }
+}
+
+impl<T, B> Future for Connection<T, B>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+    B: Buf,
+{
+    type Output = Result<(), crate::Error>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        self.inner.maybe_close_connection_if_no_streams();
+        self.inner.poll(cx).map_err(Into::into)
+    }
+}
+
+impl<T, B> fmt::Debug for Connection<T, B>
+where
+    T: AsyncRead + AsyncWrite,
+    T: fmt::Debug,
+    B: fmt::Debug + Buf,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&self.inner, fmt)
+    }
+}
+
+// ===== impl ResponseFuture =====
+
+impl Future for ResponseFuture {
+    type Output = Result<Response<RecvStream>, crate::Error>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        let (parts, _) = ready!(self.inner.poll_response(cx))?.into_parts();
+        let body = RecvStream::new(FlowControl::new(self.inner.clone()));
+
+        Poll::Ready(Ok(Response::from_parts(parts, body)))
+    }
+}
+
+impl ResponseFuture {
+    /// Returns the stream ID of the response stream.
+    ///
+    /// # Panics
+    ///
+    /// If the lock on the stream store has been poisoned.
+    pub fn stream_id(&self) -> crate::StreamId {
+        crate::StreamId::from_internal(self.inner.stream_id())
+    }
+    /// Returns a stream of PushPromises
+    ///
+    /// # Panics
+    ///
+    /// If this method has been called before
+    /// or the stream was itself was pushed
+    pub fn push_promises(&mut self) -> PushPromises {
+        if self.push_promise_consumed {
+            panic!("Reference to push promises stream taken!");
+        }
+        self.push_promise_consumed = true;
+        PushPromises {
+            inner: self.inner.clone(),
+        }
+    }
+}
+
+// ===== impl PushPromises =====
+
+impl PushPromises {
+    /// Get the next `PushPromise`.
+    pub async fn push_promise(&mut self) -> Option<Result<PushPromise, crate::Error>> {
+        futures_util::future::poll_fn(move |cx| self.poll_push_promise(cx)).await
+    }
+
+    #[doc(hidden)]
+    pub fn poll_push_promise(
+        &mut self,
+        cx: &mut Context<'_>,
+    ) -> Poll<Option<Result<PushPromise, crate::Error>>> {
+        match self.inner.poll_pushed(cx) {
+            Poll::Ready(Some(Ok((request, response)))) => {
+                let response = PushedResponseFuture {
+                    inner: ResponseFuture {
+                        inner: response,
+                        push_promise_consumed: false,
+                    },
+                };
+                Poll::Ready(Some(Ok(PushPromise { request, response })))
+            }
+            Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.into()))),
+            Poll::Ready(None) => Poll::Ready(None),
+            Poll::Pending => Poll::Pending,
+        }
+    }
+}
+
+#[cfg(feature = "stream")]
+impl futures_core::Stream for PushPromises {
+    type Item = Result<PushPromise, crate::Error>;
+
+    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+        self.poll_push_promise(cx)
+    }
+}
+
+// ===== impl PushPromise =====
+
+impl PushPromise {
+    /// Returns a reference to the push promise's request headers.
+    pub fn request(&self) -> &Request<()> {
+        &self.request
+    }
+
+    /// Returns a mutable reference to the push promise's request headers.
+    pub fn request_mut(&mut self) -> &mut Request<()> {
+        &mut self.request
+    }
+
+    /// Consumes `self`, returning the push promise's request headers and
+    /// response future.
+    pub fn into_parts(self) -> (Request<()>, PushedResponseFuture) {
+        (self.request, self.response)
+    }
+}
+
+// ===== impl PushedResponseFuture =====
+
+impl Future for PushedResponseFuture {
+    type Output = Result<Response<RecvStream>, crate::Error>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        Pin::new(&mut self.inner).poll(cx)
+    }
+}
+
+impl PushedResponseFuture {
+    /// Returns the stream ID of the response stream.
+    ///
+    /// # Panics
+    ///
+    /// If the lock on the stream store has been poisoned.
+    pub fn stream_id(&self) -> crate::StreamId {
+        self.inner.stream_id()
+    }
+}
+
+// ===== impl Peer =====
+
+impl Peer {
+    pub fn convert_send_message(
+        id: StreamId,
+        request: Request<()>,
+        protocol: Option<Protocol>,
+        end_of_stream: bool,
+    ) -> Result<Headers, SendError> {
+        use http::request::Parts;
+
+        let (
+            Parts {
+                method,
+                uri,
+                headers,
+                version,
+                ..
+            },
+            _,
+        ) = request.into_parts();
+
+        let is_connect = method == Method::CONNECT;
+
+        // Build the set pseudo header set. All requests will include `method`
+        // and `path`.
+        let mut pseudo = Pseudo::request(method, uri, protocol);
+
+        if pseudo.scheme.is_none() {
+            // If the scheme is not set, then there are a two options.
+            //
+            // 1) Authority is not set. In this case, a request was issued with
+            //    a relative URI. This is permitted **only** when forwarding
+            //    HTTP 1.x requests. If the HTTP version is set to 2.0, then
+            //    this is an error.
+            //
+            // 2) Authority is set, then the HTTP method *must* be CONNECT.
+            //
+            // It is not possible to have a scheme but not an authority set (the
+            // `http` crate does not allow it).
+            //
+            if pseudo.authority.is_none() {
+                if version == Version::HTTP_2 {
+                    return Err(UserError::MissingUriSchemeAndAuthority.into());
+                } else {
+                    // This is acceptable as per the above comment. However,
+                    // HTTP/2 requires that a scheme is set. Since we are
+                    // forwarding an HTTP 1.1 request, the scheme is set to
+                    // "http".
+                    pseudo.set_scheme(uri::Scheme::HTTP);
+                }
+            } else if !is_connect {
+                // TODO: Error
+            }
+        }
+
+        // Create the HEADERS frame
+        let mut frame = Headers::new(id, pseudo, headers);
+
+        if end_of_stream {
+            frame.set_end_stream()
+        }
+
+        Ok(frame)
+    }
+}
+
+impl proto::Peer for Peer {
+    type Poll = Response<()>;
+
+    const NAME: &'static str = "Client";
+
+    fn r#dyn() -> proto::DynPeer {
+        proto::DynPeer::Client
+    }
+
+    /*
+    fn is_server() -> bool {
+        false
+    }
+    */
+
+    fn convert_poll_message(
+        pseudo: Pseudo,
+        fields: HeaderMap,
+        stream_id: StreamId,
+    ) -> Result<Self::Poll, Error> {
+        let mut b = Response::builder();
+
+        b = b.version(Version::HTTP_2);
+
+        if let Some(status) = pseudo.status {
+            b = b.status(status);
+        }
+
+        let mut response = match b.body(()) {
+            Ok(response) => response,
+            Err(_) => {
+                // TODO: Should there be more specialized handling for different
+                // kinds of errors
+                return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR));
+            }
+        };
+
+        *response.headers_mut() = fields;
+
+        Ok(response)
+    }
+}
diff --git a/src/codec/error.rs b/src/codec/error.rs
new file mode 100644
index 0000000..0acb913
--- /dev/null
+++ b/src/codec/error.rs
@@ -0,0 +1,102 @@
+use crate::proto::Error;
+
+use std::{error, fmt, io};
+
+/// Errors caused by sending a message
+#[derive(Debug)]
+pub enum SendError {
+    Connection(Error),
+    User(UserError),
+}
+
+/// Errors caused by users of the library
+#[derive(Debug)]
+pub enum UserError {
+    /// The stream ID is no longer accepting frames.
+    InactiveStreamId,
+
+    /// The stream is not currently expecting a frame of this type.
+    UnexpectedFrameType,
+
+    /// The payload size is too big
+    PayloadTooBig,
+
+    /// The application attempted to initiate too many streams to remote.
+    Rejected,
+
+    /// The released capacity is larger than claimed capacity.
+    ReleaseCapacityTooBig,
+
+    /// The stream ID space is overflowed.
+    ///
+    /// A new connection is needed.
+    OverflowedStreamId,
+
+    /// Illegal headers, such as connection-specific headers.
+    MalformedHeaders,
+
+    /// Request submitted with relative URI.
+    MissingUriSchemeAndAuthority,
+
+    /// Calls `SendResponse::poll_reset` after having called `send_response`.
+    PollResetAfterSendResponse,
+
+    /// Calls `PingPong::send_ping` before receiving a pong.
+    SendPingWhilePending,
+
+    /// Tries to update local SETTINGS while ACK has not been received.
+    SendSettingsWhilePending,
+
+    /// Tries to send push promise to peer who has disabled server push
+    PeerDisabledServerPush,
+}
+
+// ===== impl SendError =====
+
+impl error::Error for SendError {}
+
+impl fmt::Display for SendError {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            Self::Connection(ref e) => e.fmt(fmt),
+            Self::User(ref e) => e.fmt(fmt),
+        }
+    }
+}
+
+impl From<io::Error> for SendError {
+    fn from(src: io::Error) -> Self {
+        Self::Connection(src.into())
+    }
+}
+
+impl From<UserError> for SendError {
+    fn from(src: UserError) -> Self {
+        SendError::User(src)
+    }
+}
+
+// ===== impl UserError =====
+
+impl error::Error for UserError {}
+
+impl fmt::Display for UserError {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        use self::UserError::*;
+
+        fmt.write_str(match *self {
+            InactiveStreamId => "inactive stream",
+            UnexpectedFrameType => "unexpected frame type",
+            PayloadTooBig => "payload too big",
+            Rejected => "rejected",
+            ReleaseCapacityTooBig => "release capacity too big",
+            OverflowedStreamId => "stream ID overflowed",
+            MalformedHeaders => "malformed headers",
+            MissingUriSchemeAndAuthority => "request URI missing scheme and authority",
+            PollResetAfterSendResponse => "poll_reset after send_response is illegal",
+            SendPingWhilePending => "send_ping before received previous pong",
+            SendSettingsWhilePending => "sending SETTINGS before received previous ACK",
+            PeerDisabledServerPush => "sending PUSH_PROMISE to peer who disabled server push",
+        })
+    }
+}
diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs
new file mode 100644
index 0000000..9270a86
--- /dev/null
+++ b/src/codec/framed_read.rs
@@ -0,0 +1,466 @@
+use crate::frame::{self, Frame, Kind, Reason};
+use crate::frame::{
+    DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE,
+};
+use crate::proto::Error;
+
+use crate::hpack;
+
+use futures_core::Stream;
+
+use bytes::BytesMut;
+
+use std::io;
+
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use tokio::io::AsyncRead;
+use tokio_util::codec::FramedRead as InnerFramedRead;
+use tokio_util::codec::{LengthDelimitedCodec, LengthDelimitedCodecError};
+
+// 16 MB "sane default" taken from golang http2
+const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: usize = 16 << 20;
+
+#[derive(Debug)]
+pub struct FramedRead<T> {
+    inner: InnerFramedRead<T, LengthDelimitedCodec>,
+
+    // hpack decoder state
+    hpack: hpack::Decoder,
+
+    max_header_list_size: usize,
+
+    max_continuation_frames: usize,
+
+    partial: Option<Partial>,
+}
+
+/// Partially loaded headers frame
+#[derive(Debug)]
+struct Partial {
+    /// Empty frame
+    frame: Continuable,
+
+    /// Partial header payload
+    buf: BytesMut,
+
+    continuation_frames_count: usize,
+}
+
+#[derive(Debug)]
+enum Continuable {
+    Headers(frame::Headers),
+    PushPromise(frame::PushPromise),
+}
+
+impl<T> FramedRead<T> {
+    pub fn new(inner: InnerFramedRead<T, LengthDelimitedCodec>) -> FramedRead<T> {
+        let max_header_list_size = DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE;
+        let max_continuation_frames =
+            calc_max_continuation_frames(max_header_list_size, inner.decoder().max_frame_length());
+        FramedRead {
+            inner,
+            hpack: hpack::Decoder::new(DEFAULT_SETTINGS_HEADER_TABLE_SIZE),
+            max_header_list_size,
+            max_continuation_frames,
+            partial: None,
+        }
+    }
+
+    pub fn get_ref(&self) -> &T {
+        self.inner.get_ref()
+    }
+
+    pub fn get_mut(&mut self) -> &mut T {
+        self.inner.get_mut()
+    }
+
+    /// Returns the current max frame size setting
+    #[inline]
+    pub fn max_frame_size(&self) -> usize {
+        self.inner.decoder().max_frame_length()
+    }
+
+    /// Updates the max frame size setting.
+    ///
+    /// Must be within 16,384 and 16,777,215.
+    #[inline]
+    pub fn set_max_frame_size(&mut self, val: usize) {
+        assert!(DEFAULT_MAX_FRAME_SIZE as usize <= val && val <= MAX_MAX_FRAME_SIZE as usize);
+        self.inner.decoder_mut().set_max_frame_length(val);
+        // Update max CONTINUATION frames too, since its based on this
+        self.max_continuation_frames = calc_max_continuation_frames(self.max_header_list_size, val);
+    }
+
+    /// Update the max header list size setting.
+    #[inline]
+    pub fn set_max_header_list_size(&mut self, val: usize) {
+        self.max_header_list_size = val;
+        // Update max CONTINUATION frames too, since its based on this
+        self.max_continuation_frames = calc_max_continuation_frames(val, self.max_frame_size());
+    }
+
+    /// Update the header table size setting.
+    #[inline]
+    pub fn set_header_table_size(&mut self, val: usize) {
+        self.hpack.queue_size_update(val);
+    }
+}
+
+fn calc_max_continuation_frames(header_max: usize, frame_max: usize) -> usize {
+    // At least this many frames needed to use max header list size
+    let min_frames_for_list = (header_max / frame_max).max(1);
+    // Some padding for imperfectly packed frames
+    // 25% without floats
+    let padding = min_frames_for_list >> 2;
+    min_frames_for_list.saturating_add(padding).max(5)
+}
+
+/// Decodes a frame.
+///
+/// This method is intentionally de-generified and outlined because it is very large.
+fn decode_frame(
+    hpack: &mut hpack::Decoder,
+    max_header_list_size: usize,
+    max_continuation_frames: usize,
+    partial_inout: &mut Option<Partial>,
+    mut bytes: BytesMut,
+) -> Result<Option<Frame>, Error> {
+    let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len());
+    let _e = span.enter();
+
+    tracing::trace!("decoding frame from {}B", bytes.len());
+
+    // Parse the head
+    let head = frame::Head::parse(&bytes);
+
+    if partial_inout.is_some() && head.kind() != Kind::Continuation {
+        proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind());
+        return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+    }
+
+    let kind = head.kind();
+
+    tracing::trace!(frame.kind = ?kind);
+
+    macro_rules! header_block {
+        ($frame:ident, $head:ident, $bytes:ident) => ({
+            // Drop the frame header
+            // TODO: Change to drain: carllerche/bytes#130
+            let _ = $bytes.split_to(frame::HEADER_LEN);
+
+            // Parse the header frame w/o parsing the payload
+            let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) {
+                Ok(res) => res,
+                Err(frame::Error::InvalidDependencyId) => {
+                    proto_err!(stream: "invalid HEADERS dependency ID");
+                    // A stream cannot depend on itself. An endpoint MUST
+                    // treat this as a stream error (Section 5.4.2) of type
+                    // `PROTOCOL_ERROR`.
+                    return Err(Error::library_reset($head.stream_id(), Reason::PROTOCOL_ERROR));
+                },
+                Err(e) => {
+                    proto_err!(conn: "failed to load frame; err={:?}", e);
+                    return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+                }
+            };
+
+            let is_end_headers = frame.is_end_headers();
+
+            // Load the HPACK encoded headers
+            match frame.load_hpack(&mut payload, max_header_list_size, hpack) {
+                Ok(_) => {},
+                Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {},
+                Err(frame::Error::MalformedMessage) => {
+                    let id = $head.stream_id();
+                    proto_err!(stream: "malformed header block; stream={:?}", id);
+                    return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR));
+                },
+                Err(e) => {
+                    proto_err!(conn: "failed HPACK decoding; err={:?}", e);
+                    return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+                }
+            }
+
+            if is_end_headers {
+                frame.into()
+            } else {
+                tracing::trace!("loaded partial header block");
+                // Defer returning the frame
+                *partial_inout = Some(Partial {
+                    frame: Continuable::$frame(frame),
+                    buf: payload,
+                    continuation_frames_count: 0,
+                });
+
+                return Ok(None);
+            }
+        });
+    }
+
+    let frame = match kind {
+        Kind::Settings => {
+            let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]);
+
+            res.map_err(|e| {
+                proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e);
+                Error::library_go_away(Reason::PROTOCOL_ERROR)
+            })?
+            .into()
+        }
+        Kind::Ping => {
+            let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]);
+
+            res.map_err(|e| {
+                proto_err!(conn: "failed to load PING frame; err={:?}", e);
+                Error::library_go_away(Reason::PROTOCOL_ERROR)
+            })?
+            .into()
+        }
+        Kind::WindowUpdate => {
+            let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]);
+
+            res.map_err(|e| {
+                proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e);
+                Error::library_go_away(Reason::PROTOCOL_ERROR)
+            })?
+            .into()
+        }
+        Kind::Data => {
+            let _ = bytes.split_to(frame::HEADER_LEN);
+            let res = frame::Data::load(head, bytes.freeze());
+
+            // TODO: Should this always be connection level? Probably not...
+            res.map_err(|e| {
+                proto_err!(conn: "failed to load DATA frame; err={:?}", e);
+                Error::library_go_away(Reason::PROTOCOL_ERROR)
+            })?
+            .into()
+        }
+        Kind::Headers => header_block!(Headers, head, bytes),
+        Kind::Reset => {
+            let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]);
+            res.map_err(|e| {
+                proto_err!(conn: "failed to load RESET frame; err={:?}", e);
+                Error::library_go_away(Reason::PROTOCOL_ERROR)
+            })?
+            .into()
+        }
+        Kind::GoAway => {
+            let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]);
+            res.map_err(|e| {
+                proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e);
+                Error::library_go_away(Reason::PROTOCOL_ERROR)
+            })?
+            .into()
+        }
+        Kind::PushPromise => header_block!(PushPromise, head, bytes),
+        Kind::Priority => {
+            if head.stream_id() == 0 {
+                // Invalid stream identifier
+                proto_err!(conn: "invalid stream ID 0");
+                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+            }
+
+            match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) {
+                Ok(frame) => frame.into(),
+                Err(frame::Error::InvalidDependencyId) => {
+                    // A stream cannot depend on itself. An endpoint MUST
+                    // treat this as a stream error (Section 5.4.2) of type
+                    // `PROTOCOL_ERROR`.
+                    let id = head.stream_id();
+                    proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id);
+                    return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR));
+                }
+                Err(e) => {
+                    proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e);
+                    return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+                }
+            }
+        }
+        Kind::Continuation => {
+            let is_end_headers = (head.flag() & 0x4) == 0x4;
+
+            let mut partial = match partial_inout.take() {
+                Some(partial) => partial,
+                None => {
+                    proto_err!(conn: "received unexpected CONTINUATION frame");
+                    return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+                }
+            };
+
+            // The stream identifiers must match
+            if partial.frame.stream_id() != head.stream_id() {
+                proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID");
+                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+            }
+
+            // Check for CONTINUATION flood
+            if is_end_headers {
+                partial.continuation_frames_count = 0;
+            } else {
+                let cnt = partial.continuation_frames_count + 1;
+                if cnt > max_continuation_frames {
+                    tracing::debug!("too_many_continuations, max = {}", max_continuation_frames);
+                    return Err(Error::library_go_away_data(
+                        Reason::ENHANCE_YOUR_CALM,
+                        "too_many_continuations",
+                    ));
+                } else {
+                    partial.continuation_frames_count = cnt;
+                }
+            }
+
+            // Extend the buf
+            if partial.buf.is_empty() {
+                partial.buf = bytes.split_off(frame::HEADER_LEN);
+            } else {
+                if partial.frame.is_over_size() {
+                    // If there was left over bytes previously, they may be
+                    // needed to continue decoding, even though we will
+                    // be ignoring this frame. This is done to keep the HPACK
+                    // decoder state up-to-date.
+                    //
+                    // Still, we need to be careful, because if a malicious
+                    // attacker were to try to send a gigantic string, such
+                    // that it fits over multiple header blocks, we could
+                    // grow memory uncontrollably again, and that'd be a shame.
+                    //
+                    // Instead, we use a simple heuristic to determine if
+                    // we should continue to ignore decoding, or to tell
+                    // the attacker to go away.
+                    if partial.buf.len() + bytes.len() > max_header_list_size {
+                        proto_err!(conn: "CONTINUATION frame header block size over ignorable limit");
+                        return Err(Error::library_go_away(Reason::COMPRESSION_ERROR));
+                    }
+                }
+                partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]);
+            }
+
+            match partial
+                .frame
+                .load_hpack(&mut partial.buf, max_header_list_size, hpack)
+            {
+                Ok(_) => {}
+                Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}
+                Err(frame::Error::MalformedMessage) => {
+                    let id = head.stream_id();
+                    proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id);
+                    return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR));
+                }
+                Err(e) => {
+                    proto_err!(conn: "failed HPACK decoding; err={:?}", e);
+                    return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+                }
+            }
+
+            if is_end_headers {
+                partial.frame.into()
+            } else {
+                *partial_inout = Some(partial);
+                return Ok(None);
+            }
+        }
+        Kind::Unknown => {
+            // Unknown frames are ignored
+            return Ok(None);
+        }
+    };
+
+    Ok(Some(frame))
+}
+
+impl<T> Stream for FramedRead<T>
+where
+    T: AsyncRead + Unpin,
+{
+    type Item = Result<Frame, Error>;
+
+    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+        let span = tracing::trace_span!("FramedRead::poll_next");
+        let _e = span.enter();
+        loop {
+            tracing::trace!("poll");
+            let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) {
+                Some(Ok(bytes)) => bytes,
+                Some(Err(e)) => return Poll::Ready(Some(Err(map_err(e)))),
+                None => return Poll::Ready(None),
+            };
+
+            tracing::trace!(read.bytes = bytes.len());
+            let Self {
+                ref mut hpack,
+                max_header_list_size,
+                ref mut partial,
+                max_continuation_frames,
+                ..
+            } = *self;
+            if let Some(frame) = decode_frame(
+                hpack,
+                max_header_list_size,
+                max_continuation_frames,
+                partial,
+                bytes,
+            )? {
+                tracing::debug!(?frame, "received");
+                return Poll::Ready(Some(Ok(frame)));
+            }
+        }
+    }
+}
+
+fn map_err(err: io::Error) -> Error {
+    if let io::ErrorKind::InvalidData = err.kind() {
+        if let Some(custom) = err.get_ref() {
+            if custom.is::<LengthDelimitedCodecError>() {
+                return Error::library_go_away(Reason::FRAME_SIZE_ERROR);
+            }
+        }
+    }
+    err.into()
+}
+
+// ===== impl Continuable =====
+
+impl Continuable {
+    fn stream_id(&self) -> frame::StreamId {
+        match *self {
+            Continuable::Headers(ref h) => h.stream_id(),
+            Continuable::PushPromise(ref p) => p.stream_id(),
+        }
+    }
+
+    fn is_over_size(&self) -> bool {
+        match *self {
+            Continuable::Headers(ref h) => h.is_over_size(),
+            Continuable::PushPromise(ref p) => p.is_over_size(),
+        }
+    }
+
+    fn load_hpack(
+        &mut self,
+        src: &mut BytesMut,
+        max_header_list_size: usize,
+        decoder: &mut hpack::Decoder,
+    ) -> Result<(), frame::Error> {
+        match *self {
+            Continuable::Headers(ref mut h) => h.load_hpack(src, max_header_list_size, decoder),
+            Continuable::PushPromise(ref mut p) => p.load_hpack(src, max_header_list_size, decoder),
+        }
+    }
+}
+
+impl<T> From<Continuable> for Frame<T> {
+    fn from(cont: Continuable) -> Self {
+        match cont {
+            Continuable::Headers(mut headers) => {
+                headers.set_end_headers();
+                headers.into()
+            }
+            Continuable::PushPromise(mut push) => {
+                push.set_end_headers();
+                push.into()
+            }
+        }
+    }
+}
diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs
new file mode 100644
index 0000000..c88af02
--- /dev/null
+++ b/src/codec/framed_write.rs
@@ -0,0 +1,362 @@
+use crate::codec::UserError;
+use crate::codec::UserError::*;
+use crate::frame::{self, Frame, FrameSize};
+use crate::hpack;
+
+use bytes::{Buf, BufMut, BytesMut};
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
+use tokio_util::io::poll_write_buf;
+
+use std::io::{self, Cursor};
+
+// A macro to get around a method needing to borrow &mut self
+macro_rules! limited_write_buf {
+    ($self:expr) => {{
+        let limit = $self.max_frame_size() + frame::HEADER_LEN;
+        $self.buf.get_mut().limit(limit)
+    }};
+}
+
+#[derive(Debug)]
+pub struct FramedWrite<T, B> {
+    /// Upstream `AsyncWrite`
+    inner: T,
+
+    encoder: Encoder<B>,
+}
+
+#[derive(Debug)]
+struct Encoder<B> {
+    /// HPACK encoder
+    hpack: hpack::Encoder,
+
+    /// Write buffer
+    ///
+    /// TODO: Should this be a ring buffer?
+    buf: Cursor<BytesMut>,
+
+    /// Next frame to encode
+    next: Option<Next<B>>,
+
+    /// Last data frame
+    last_data_frame: Option<frame::Data<B>>,
+
+    /// Max frame size, this is specified by the peer
+    max_frame_size: FrameSize,
+
+    /// Chain payloads bigger than this.
+    chain_threshold: usize,
+
+    /// Min buffer required to attempt to write a frame
+    min_buffer_capacity: usize,
+}
+
+#[derive(Debug)]
+enum Next<B> {
+    Data(frame::Data<B>),
+    Continuation(frame::Continuation),
+}
+
+/// Initialize the connection with this amount of write buffer.
+///
+/// The minimum MAX_FRAME_SIZE is 16kb, so always be able to send a HEADERS
+/// frame that big.
+const DEFAULT_BUFFER_CAPACITY: usize = 16 * 1_024;
+
+/// Chain payloads bigger than this when vectored I/O is enabled. The remote
+/// will never advertise a max frame size less than this (well, the spec says
+/// the max frame size can't be less than 16kb, so not even close).
+const CHAIN_THRESHOLD: usize = 256;
+
+/// Chain payloads bigger than this when vectored I/O is **not** enabled.
+/// A larger value in this scenario will reduce the number of small and
+/// fragmented data being sent, and hereby improve the throughput.
+const CHAIN_THRESHOLD_WITHOUT_VECTORED_IO: usize = 1024;
+
+// TODO: Make generic
+impl<T, B> FramedWrite<T, B>
+where
+    T: AsyncWrite + Unpin,
+    B: Buf,
+{
+    pub fn new(inner: T) -> FramedWrite<T, B> {
+        let chain_threshold = if inner.is_write_vectored() {
+            CHAIN_THRESHOLD
+        } else {
+            CHAIN_THRESHOLD_WITHOUT_VECTORED_IO
+        };
+        FramedWrite {
+            inner,
+            encoder: Encoder {
+                hpack: hpack::Encoder::default(),
+                buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)),
+                next: None,
+                last_data_frame: None,
+                max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE,
+                chain_threshold,
+                min_buffer_capacity: chain_threshold + frame::HEADER_LEN,
+            },
+        }
+    }
+
+    /// Returns `Ready` when `send` is able to accept a frame
+    ///
+    /// Calling this function may result in the current contents of the buffer
+    /// to be flushed to `T`.
+    pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
+        if !self.encoder.has_capacity() {
+            // Try flushing
+            ready!(self.flush(cx))?;
+
+            if !self.encoder.has_capacity() {
+                return Poll::Pending;
+            }
+        }
+
+        Poll::Ready(Ok(()))
+    }
+
+    /// Buffer a frame.
+    ///
+    /// `poll_ready` must be called first to ensure that a frame may be
+    /// accepted.
+    pub fn buffer(&mut self, item: Frame<B>) -> Result<(), UserError> {
+        self.encoder.buffer(item)
+    }
+
+    /// Flush buffered data to the wire
+    pub fn flush(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
+        let span = tracing::trace_span!("FramedWrite::flush");
+        let _e = span.enter();
+
+        loop {
+            while !self.encoder.is_empty() {
+                match self.encoder.next {
+                    Some(Next::Data(ref mut frame)) => {
+                        tracing::trace!(queued_data_frame = true);
+                        let mut buf = (&mut self.encoder.buf).chain(frame.payload_mut());
+                        ready!(poll_write_buf(Pin::new(&mut self.inner), cx, &mut buf))?
+                    }
+                    _ => {
+                        tracing::trace!(queued_data_frame = false);
+                        ready!(poll_write_buf(
+                            Pin::new(&mut self.inner),
+                            cx,
+                            &mut self.encoder.buf
+                        ))?
+                    }
+                };
+            }
+
+            match self.encoder.unset_frame() {
+                ControlFlow::Continue => (),
+                ControlFlow::Break => break,
+            }
+        }
+
+        tracing::trace!("flushing buffer");
+        // Flush the upstream
+        ready!(Pin::new(&mut self.inner).poll_flush(cx))?;
+
+        Poll::Ready(Ok(()))
+    }
+
+    /// Close the codec
+    pub fn shutdown(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
+        ready!(self.flush(cx))?;
+        Pin::new(&mut self.inner).poll_shutdown(cx)
+    }
+}
+
+#[must_use]
+enum ControlFlow {
+    Continue,
+    Break,
+}
+
+impl<B> Encoder<B>
+where
+    B: Buf,
+{
+    fn unset_frame(&mut self) -> ControlFlow {
+        // Clear internal buffer
+        self.buf.set_position(0);
+        self.buf.get_mut().clear();
+
+        // The data frame has been written, so unset it
+        match self.next.take() {
+            Some(Next::Data(frame)) => {
+                self.last_data_frame = Some(frame);
+                debug_assert!(self.is_empty());
+                ControlFlow::Break
+            }
+            Some(Next::Continuation(frame)) => {
+                // Buffer the continuation frame, then try to write again
+                let mut buf = limited_write_buf!(self);
+                if let Some(continuation) = frame.encode(&mut buf) {
+                    self.next = Some(Next::Continuation(continuation));
+                }
+                ControlFlow::Continue
+            }
+            None => ControlFlow::Break,
+        }
+    }
+
+    fn buffer(&mut self, item: Frame<B>) -> Result<(), UserError> {
+        // Ensure that we have enough capacity to accept the write.
+        assert!(self.has_capacity());
+        let span = tracing::trace_span!("FramedWrite::buffer", frame = ?item);
+        let _e = span.enter();
+
+        tracing::debug!(frame = ?item, "send");
+
+        match item {
+            Frame::Data(mut v) => {
+                // Ensure that the payload is not greater than the max frame.
+                let len = v.payload().remaining();
+
+                if len > self.max_frame_size() {
+                    return Err(PayloadTooBig);
+                }
+
+                if len >= self.chain_threshold {
+                    let head = v.head();
+
+                    // Encode the frame head to the buffer
+                    head.encode(len, self.buf.get_mut());
+
+                    if self.buf.get_ref().remaining() < self.chain_threshold {
+                        let extra_bytes = self.chain_threshold - self.buf.remaining();
+                        self.buf.get_mut().put(v.payload_mut().take(extra_bytes));
+                    }
+
+                    // Save the data frame
+                    self.next = Some(Next::Data(v));
+                } else {
+                    v.encode_chunk(self.buf.get_mut());
+
+                    // The chunk has been fully encoded, so there is no need to
+                    // keep it around
+                    assert_eq!(v.payload().remaining(), 0, "chunk not fully encoded");
+
+                    // Save off the last frame...
+                    self.last_data_frame = Some(v);
+                }
+            }
+            Frame::Headers(v) => {
+                let mut buf = limited_write_buf!(self);
+                if let Some(continuation) = v.encode(&mut self.hpack, &mut buf) {
+                    self.next = Some(Next::Continuation(continuation));
+                }
+            }
+            Frame::PushPromise(v) => {
+                let mut buf = limited_write_buf!(self);
+                if let Some(continuation) = v.encode(&mut self.hpack, &mut buf) {
+                    self.next = Some(Next::Continuation(continuation));
+                }
+            }
+            Frame::Settings(v) => {
+                v.encode(self.buf.get_mut());
+                tracing::trace!(rem = self.buf.remaining(), "encoded settings");
+            }
+            Frame::GoAway(v) => {
+                v.encode(self.buf.get_mut());
+                tracing::trace!(rem = self.buf.remaining(), "encoded go_away");
+            }
+            Frame::Ping(v) => {
+                v.encode(self.buf.get_mut());
+                tracing::trace!(rem = self.buf.remaining(), "encoded ping");
+            }
+            Frame::WindowUpdate(v) => {
+                v.encode(self.buf.get_mut());
+                tracing::trace!(rem = self.buf.remaining(), "encoded window_update");
+            }
+
+            Frame::Priority(_) => {
+                /*
+                v.encode(self.buf.get_mut());
+                tracing::trace!("encoded priority; rem={:?}", self.buf.remaining());
+                */
+                unimplemented!();
+            }
+            Frame::Reset(v) => {
+                v.encode(self.buf.get_mut());
+                tracing::trace!(rem = self.buf.remaining(), "encoded reset");
+            }
+        }
+
+        Ok(())
+    }
+
+    fn has_capacity(&self) -> bool {
+        self.next.is_none()
+            && (self.buf.get_ref().capacity() - self.buf.get_ref().len()
+                >= self.min_buffer_capacity)
+    }
+
+    fn is_empty(&self) -> bool {
+        match self.next {
+            Some(Next::Data(ref frame)) => !frame.payload().has_remaining(),
+            _ => !self.buf.has_remaining(),
+        }
+    }
+}
+
+impl<B> Encoder<B> {
+    fn max_frame_size(&self) -> usize {
+        self.max_frame_size as usize
+    }
+}
+
+impl<T, B> FramedWrite<T, B> {
+    /// Returns the max frame size that can be sent
+    pub fn max_frame_size(&self) -> usize {
+        self.encoder.max_frame_size()
+    }
+
+    /// Set the peer's max frame size.
+    pub fn set_max_frame_size(&mut self, val: usize) {
+        assert!(val <= frame::MAX_MAX_FRAME_SIZE as usize);
+        self.encoder.max_frame_size = val as FrameSize;
+    }
+
+    /// Set the peer's header table size.
+    pub fn set_header_table_size(&mut self, val: usize) {
+        self.encoder.hpack.update_max_size(val);
+    }
+
+    /// Retrieve the last data frame that has been sent
+    pub fn take_last_data_frame(&mut self) -> Option<frame::Data<B>> {
+        self.encoder.last_data_frame.take()
+    }
+
+    pub fn get_mut(&mut self) -> &mut T {
+        &mut self.inner
+    }
+}
+
+impl<T: AsyncRead + Unpin, B> AsyncRead for FramedWrite<T, B> {
+    fn poll_read(
+        mut self: Pin<&mut Self>,
+        cx: &mut Context<'_>,
+        buf: &mut ReadBuf,
+    ) -> Poll<io::Result<()>> {
+        Pin::new(&mut self.inner).poll_read(cx, buf)
+    }
+}
+
+// We never project the Pin to `B`.
+impl<T: Unpin, B> Unpin for FramedWrite<T, B> {}
+
+#[cfg(feature = "unstable")]
+mod unstable {
+    use super::*;
+
+    impl<T, B> FramedWrite<T, B> {
+        pub fn get_ref(&self) -> &T {
+            &self.inner
+        }
+    }
+}
diff --git a/src/codec/mod.rs b/src/codec/mod.rs
new file mode 100644
index 0000000..6cbdc1e
--- /dev/null
+++ b/src/codec/mod.rs
@@ -0,0 +1,206 @@
+mod error;
+mod framed_read;
+mod framed_write;
+
+pub use self::error::{SendError, UserError};
+
+use self::framed_read::FramedRead;
+use self::framed_write::FramedWrite;
+
+use crate::frame::{self, Data, Frame};
+use crate::proto::Error;
+
+use bytes::Buf;
+use futures_core::Stream;
+use futures_sink::Sink;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use tokio::io::{AsyncRead, AsyncWrite};
+use tokio_util::codec::length_delimited;
+
+use std::io;
+
+#[derive(Debug)]
+pub struct Codec<T, B> {
+    inner: FramedRead<FramedWrite<T, B>>,
+}
+
+impl<T, B> Codec<T, B>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+    B: Buf,
+{
+    /// Returns a new `Codec` with the default max frame size
+    #[inline]
+    pub fn new(io: T) -> Self {
+        Self::with_max_recv_frame_size(io, frame::DEFAULT_MAX_FRAME_SIZE as usize)
+    }
+
+    /// Returns a new `Codec` with the given maximum frame size
+    pub fn with_max_recv_frame_size(io: T, max_frame_size: usize) -> Self {
+        // Wrap with writer
+        let framed_write = FramedWrite::new(io);
+
+        // Delimit the frames
+        let delimited = length_delimited::Builder::new()
+            .big_endian()
+            .length_field_length(3)
+            .length_adjustment(9)
+            .num_skip(0) // Don't skip the header
+            .new_read(framed_write);
+
+        let mut inner = FramedRead::new(delimited);
+
+        // Use FramedRead's method since it checks the value is within range.
+        inner.set_max_frame_size(max_frame_size);
+
+        Codec { inner }
+    }
+}
+
+impl<T, B> Codec<T, B> {
+    /// Updates the max received frame size.
+    ///
+    /// The change takes effect the next time a frame is decoded. In other
+    /// words, if a frame is currently in process of being decoded with a frame
+    /// size greater than `val` but less than the max frame size in effect
+    /// before calling this function, then the frame will be allowed.
+    #[inline]
+    pub fn set_max_recv_frame_size(&mut self, val: usize) {
+        self.inner.set_max_frame_size(val)
+    }
+
+    /// Returns the current max received frame size setting.
+    ///
+    /// This is the largest size this codec will accept from the wire. Larger
+    /// frames will be rejected.
+    #[cfg(feature = "unstable")]
+    #[inline]
+    pub fn max_recv_frame_size(&self) -> usize {
+        self.inner.max_frame_size()
+    }
+
+    /// Returns the max frame size that can be sent to the peer.
+    pub fn max_send_frame_size(&self) -> usize {
+        self.inner.get_ref().max_frame_size()
+    }
+
+    /// Set the peer's max frame size.
+    pub fn set_max_send_frame_size(&mut self, val: usize) {
+        self.framed_write().set_max_frame_size(val)
+    }
+
+    /// Set the peer's header table size size.
+    pub fn set_send_header_table_size(&mut self, val: usize) {
+        self.framed_write().set_header_table_size(val)
+    }
+
+    /// Set the decoder header table size size.
+    pub fn set_recv_header_table_size(&mut self, val: usize) {
+        self.inner.set_header_table_size(val)
+    }
+
+    /// Set the max header list size that can be received.
+    pub fn set_max_recv_header_list_size(&mut self, val: usize) {
+        self.inner.set_max_header_list_size(val);
+    }
+
+    /// Get a reference to the inner stream.
+    #[cfg(feature = "unstable")]
+    pub fn get_ref(&self) -> &T {
+        self.inner.get_ref().get_ref()
+    }
+
+    /// Get a mutable reference to the inner stream.
+    pub fn get_mut(&mut self) -> &mut T {
+        self.inner.get_mut().get_mut()
+    }
+
+    /// Takes the data payload value that was fully written to the socket
+    pub(crate) fn take_last_data_frame(&mut self) -> Option<Data<B>> {
+        self.framed_write().take_last_data_frame()
+    }
+
+    fn framed_write(&mut self) -> &mut FramedWrite<T, B> {
+        self.inner.get_mut()
+    }
+}
+
+impl<T, B> Codec<T, B>
+where
+    T: AsyncWrite + Unpin,
+    B: Buf,
+{
+    /// Returns `Ready` when the codec can buffer a frame
+    pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
+        self.framed_write().poll_ready(cx)
+    }
+
+    /// Buffer a frame.
+    ///
+    /// `poll_ready` must be called first to ensure that a frame may be
+    /// accepted.
+    ///
+    /// TODO: Rename this to avoid conflicts with Sink::buffer
+    pub fn buffer(&mut self, item: Frame<B>) -> Result<(), UserError> {
+        self.framed_write().buffer(item)
+    }
+
+    /// Flush buffered data to the wire
+    pub fn flush(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
+        self.framed_write().flush(cx)
+    }
+
+    /// Shutdown the send half
+    pub fn shutdown(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
+        self.framed_write().shutdown(cx)
+    }
+}
+
+impl<T, B> Stream for Codec<T, B>
+where
+    T: AsyncRead + Unpin,
+{
+    type Item = Result<Frame, Error>;
+
+    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+        Pin::new(&mut self.inner).poll_next(cx)
+    }
+}
+
+impl<T, B> Sink<Frame<B>> for Codec<T, B>
+where
+    T: AsyncWrite + Unpin,
+    B: Buf,
+{
+    type Error = SendError;
+
+    fn start_send(mut self: Pin<&mut Self>, item: Frame<B>) -> Result<(), Self::Error> {
+        Codec::buffer(&mut self, item)?;
+        Ok(())
+    }
+    /// Returns `Ready` when the codec can buffer a frame
+    fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+        self.framed_write().poll_ready(cx).map_err(Into::into)
+    }
+
+    /// Flush buffered data to the wire
+    fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+        self.framed_write().flush(cx).map_err(Into::into)
+    }
+
+    fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+        ready!(self.shutdown(cx))?;
+        Poll::Ready(Ok(()))
+    }
+}
+
+// TODO: remove (or improve) this
+impl<T> From<T> for Codec<T, bytes::Bytes>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+{
+    fn from(src: T) -> Self {
+        Self::new(src)
+    }
+}
diff --git a/src/error.rs b/src/error.rs
new file mode 100644
index 0000000..96a471b
--- /dev/null
+++ b/src/error.rs
@@ -0,0 +1,211 @@
+use crate::codec::{SendError, UserError};
+use crate::frame::StreamId;
+use crate::proto::{self, Initiator};
+
+use bytes::Bytes;
+use std::{error, fmt, io};
+
+pub use crate::frame::Reason;
+
+/// Represents HTTP/2 operation errors.
+///
+/// `Error` covers error cases raised by protocol errors caused by the
+/// peer, I/O (transport) errors, and errors caused by the user of the library.
+///
+/// If the error was caused by the remote peer, then it will contain a
+/// [`Reason`] which can be obtained with the [`reason`] function.
+///
+/// [`Reason`]: struct.Reason.html
+/// [`reason`]: #method.reason
+#[derive(Debug)]
+pub struct Error {
+    kind: Kind,
+}
+
+#[derive(Debug)]
+enum Kind {
+    /// A RST_STREAM frame was received or sent.
+    #[allow(dead_code)]
+    Reset(StreamId, Reason, Initiator),
+
+    /// A GO_AWAY frame was received or sent.
+    GoAway(Bytes, Reason, Initiator),
+
+    /// The user created an error from a bare Reason.
+    Reason(Reason),
+
+    /// An error resulting from an invalid action taken by the user of this
+    /// library.
+    User(UserError),
+
+    /// An `io::Error` occurred while trying to read or write.
+    Io(io::Error),
+}
+
+// ===== impl Error =====
+
+impl Error {
+    /// If the error was caused by the remote peer, the error reason.
+    ///
+    /// This is either an error received by the peer or caused by an invalid
+    /// action taken by the peer (i.e. a protocol error).
+    pub fn reason(&self) -> Option<Reason> {
+        match self.kind {
+            Kind::Reset(_, reason, _) | Kind::GoAway(_, reason, _) | Kind::Reason(reason) => {
+                Some(reason)
+            }
+            _ => None,
+        }
+    }
+
+    /// Returns true if the error is an io::Error
+    pub fn is_io(&self) -> bool {
+        matches!(self.kind, Kind::Io(..))
+    }
+
+    /// Returns the error if the error is an io::Error
+    pub fn get_io(&self) -> Option<&io::Error> {
+        match self.kind {
+            Kind::Io(ref e) => Some(e),
+            _ => None,
+        }
+    }
+
+    /// Returns the error if the error is an io::Error
+    pub fn into_io(self) -> Option<io::Error> {
+        match self.kind {
+            Kind::Io(e) => Some(e),
+            _ => None,
+        }
+    }
+
+    pub(crate) fn from_io(err: io::Error) -> Self {
+        Error {
+            kind: Kind::Io(err),
+        }
+    }
+
+    /// Returns true if the error is from a `GOAWAY`.
+    pub fn is_go_away(&self) -> bool {
+        matches!(self.kind, Kind::GoAway(..))
+    }
+
+    /// Returns true if the error is from a `RST_STREAM`.
+    pub fn is_reset(&self) -> bool {
+        matches!(self.kind, Kind::Reset(..))
+    }
+
+    /// Returns true if the error was received in a frame from the remote.
+    ///
+    /// Such as from a received `RST_STREAM` or `GOAWAY` frame.
+    pub fn is_remote(&self) -> bool {
+        matches!(
+            self.kind,
+            Kind::GoAway(_, _, Initiator::Remote) | Kind::Reset(_, _, Initiator::Remote)
+        )
+    }
+
+    /// Returns true if the error was created by `h2`.
+    ///
+    /// Such as noticing some protocol error and sending a GOAWAY or RST_STREAM.
+    pub fn is_library(&self) -> bool {
+        matches!(
+            self.kind,
+            Kind::GoAway(_, _, Initiator::Library) | Kind::Reset(_, _, Initiator::Library)
+        )
+    }
+}
+
+impl From<proto::Error> for Error {
+    fn from(src: proto::Error) -> Error {
+        use crate::proto::Error::*;
+
+        Error {
+            kind: match src {
+                Reset(stream_id, reason, initiator) => Kind::Reset(stream_id, reason, initiator),
+                GoAway(debug_data, reason, initiator) => {
+                    Kind::GoAway(debug_data, reason, initiator)
+                }
+                Io(kind, inner) => {
+                    Kind::Io(inner.map_or_else(|| kind.into(), |inner| io::Error::new(kind, inner)))
+                }
+            },
+        }
+    }
+}
+
+impl From<Reason> for Error {
+    fn from(src: Reason) -> Error {
+        Error {
+            kind: Kind::Reason(src),
+        }
+    }
+}
+
+impl From<SendError> for Error {
+    fn from(src: SendError) -> Error {
+        match src {
+            SendError::User(e) => e.into(),
+            SendError::Connection(e) => e.into(),
+        }
+    }
+}
+
+impl From<UserError> for Error {
+    fn from(src: UserError) -> Error {
+        Error {
+            kind: Kind::User(src),
+        }
+    }
+}
+
+impl fmt::Display for Error {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        let debug_data = match self.kind {
+            Kind::Reset(_, reason, Initiator::User) => {
+                return write!(fmt, "stream error sent by user: {}", reason)
+            }
+            Kind::Reset(_, reason, Initiator::Library) => {
+                return write!(fmt, "stream error detected: {}", reason)
+            }
+            Kind::Reset(_, reason, Initiator::Remote) => {
+                return write!(fmt, "stream error received: {}", reason)
+            }
+            Kind::GoAway(ref debug_data, reason, Initiator::User) => {
+                write!(fmt, "connection error sent by user: {}", reason)?;
+                debug_data
+            }
+            Kind::GoAway(ref debug_data, reason, Initiator::Library) => {
+                write!(fmt, "connection error detected: {}", reason)?;
+                debug_data
+            }
+            Kind::GoAway(ref debug_data, reason, Initiator::Remote) => {
+                write!(fmt, "connection error received: {}", reason)?;
+                debug_data
+            }
+            Kind::Reason(reason) => return write!(fmt, "protocol error: {}", reason),
+            Kind::User(ref e) => return write!(fmt, "user error: {}", e),
+            Kind::Io(ref e) => return e.fmt(fmt),
+        };
+
+        if !debug_data.is_empty() {
+            write!(fmt, " ({:?})", debug_data)?;
+        }
+
+        Ok(())
+    }
+}
+
+impl error::Error for Error {}
+
+#[cfg(test)]
+mod tests {
+    use super::Error;
+    use crate::Reason;
+
+    #[test]
+    fn error_from_reason() {
+        let err = Error::from(Reason::HTTP_1_1_REQUIRED);
+        assert_eq!(err.reason(), Some(Reason::HTTP_1_1_REQUIRED));
+    }
+}
diff --git a/src/ext.rs b/src/ext.rs
new file mode 100644
index 0000000..cf383a4
--- /dev/null
+++ b/src/ext.rs
@@ -0,0 +1,55 @@
+//! Extensions specific to the HTTP/2 protocol.
+
+use crate::hpack::BytesStr;
+
+use bytes::Bytes;
+use std::fmt;
+
+/// Represents the `:protocol` pseudo-header used by
+/// the [Extended CONNECT Protocol].
+///
+/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+#[derive(Clone, Eq, PartialEq)]
+pub struct Protocol {
+    value: BytesStr,
+}
+
+impl Protocol {
+    /// Converts a static string to a protocol name.
+    pub const fn from_static(value: &'static str) -> Self {
+        Self {
+            value: BytesStr::from_static(value),
+        }
+    }
+
+    /// Returns a str representation of the header.
+    pub fn as_str(&self) -> &str {
+        self.value.as_str()
+    }
+
+    pub(crate) fn try_from(bytes: Bytes) -> Result<Self, std::str::Utf8Error> {
+        Ok(Self {
+            value: BytesStr::try_from(bytes)?,
+        })
+    }
+}
+
+impl<'a> From<&'a str> for Protocol {
+    fn from(value: &'a str) -> Self {
+        Self {
+            value: BytesStr::from(value),
+        }
+    }
+}
+
+impl AsRef<[u8]> for Protocol {
+    fn as_ref(&self) -> &[u8] {
+        self.value.as_ref()
+    }
+}
+
+impl fmt::Debug for Protocol {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.value.fmt(f)
+    }
+}
diff --git a/src/frame/data.rs b/src/frame/data.rs
new file mode 100644
index 0000000..5ed3c31
--- /dev/null
+++ b/src/frame/data.rs
@@ -0,0 +1,227 @@
+use crate::frame::{util, Error, Frame, Head, Kind, StreamId};
+use bytes::{Buf, BufMut, Bytes};
+
+use std::fmt;
+
+/// Data frame
+///
+/// Data frames convey arbitrary, variable-length sequences of octets associated
+/// with a stream. One or more DATA frames are used, for instance, to carry HTTP
+/// request or response payloads.
+#[derive(Eq, PartialEq)]
+pub struct Data<T = Bytes> {
+    stream_id: StreamId,
+    data: T,
+    flags: DataFlags,
+    pad_len: Option<u8>,
+}
+
+#[derive(Copy, Clone, Default, Eq, PartialEq)]
+struct DataFlags(u8);
+
+const END_STREAM: u8 = 0x1;
+const PADDED: u8 = 0x8;
+const ALL: u8 = END_STREAM | PADDED;
+
+impl<T> Data<T> {
+    /// Creates a new DATA frame.
+    pub fn new(stream_id: StreamId, payload: T) -> Self {
+        assert!(!stream_id.is_zero());
+
+        Data {
+            stream_id,
+            data: payload,
+            flags: DataFlags::default(),
+            pad_len: None,
+        }
+    }
+
+    /// Returns the stream identifier that this frame is associated with.
+    ///
+    /// This cannot be a zero stream identifier.
+    pub fn stream_id(&self) -> StreamId {
+        self.stream_id
+    }
+
+    /// Gets the value of the `END_STREAM` flag for this frame.
+    ///
+    /// If true, this frame is the last that the endpoint will send for the
+    /// identified stream.
+    ///
+    /// Setting this flag causes the stream to enter one of the "half-closed"
+    /// states or the "closed" state (Section 5.1).
+    pub fn is_end_stream(&self) -> bool {
+        self.flags.is_end_stream()
+    }
+
+    /// Sets the value for the `END_STREAM` flag on this frame.
+    pub fn set_end_stream(&mut self, val: bool) {
+        if val {
+            self.flags.set_end_stream();
+        } else {
+            self.flags.unset_end_stream();
+        }
+    }
+
+    /// Returns whether the `PADDED` flag is set on this frame.
+    #[cfg(feature = "unstable")]
+    pub fn is_padded(&self) -> bool {
+        self.flags.is_padded()
+    }
+
+    /// Sets the value for the `PADDED` flag on this frame.
+    #[cfg(feature = "unstable")]
+    pub fn set_padded(&mut self) {
+        self.flags.set_padded();
+    }
+
+    /// Returns a reference to this frame's payload.
+    ///
+    /// This does **not** include any padding that might have been originally
+    /// included.
+    pub fn payload(&self) -> &T {
+        &self.data
+    }
+
+    /// Returns a mutable reference to this frame's payload.
+    ///
+    /// This does **not** include any padding that might have been originally
+    /// included.
+    pub fn payload_mut(&mut self) -> &mut T {
+        &mut self.data
+    }
+
+    /// Consumes `self` and returns the frame's payload.
+    ///
+    /// This does **not** include any padding that might have been originally
+    /// included.
+    pub fn into_payload(self) -> T {
+        self.data
+    }
+
+    pub(crate) fn head(&self) -> Head {
+        Head::new(Kind::Data, self.flags.into(), self.stream_id)
+    }
+
+    pub(crate) fn map<F, U>(self, f: F) -> Data<U>
+    where
+        F: FnOnce(T) -> U,
+    {
+        Data {
+            stream_id: self.stream_id,
+            data: f(self.data),
+            flags: self.flags,
+            pad_len: self.pad_len,
+        }
+    }
+}
+
+impl Data<Bytes> {
+    pub(crate) fn load(head: Head, mut payload: Bytes) -> Result<Self, Error> {
+        let flags = DataFlags::load(head.flag());
+
+        // The stream identifier must not be zero
+        if head.stream_id().is_zero() {
+            return Err(Error::InvalidStreamId);
+        }
+
+        let pad_len = if flags.is_padded() {
+            let len = util::strip_padding(&mut payload)?;
+            Some(len)
+        } else {
+            None
+        };
+
+        Ok(Data {
+            stream_id: head.stream_id(),
+            data: payload,
+            flags,
+            pad_len,
+        })
+    }
+}
+
+impl<T: Buf> Data<T> {
+    /// Encode the data frame into the `dst` buffer.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `dst` cannot contain the data frame.
+    pub(crate) fn encode_chunk<U: BufMut>(&mut self, dst: &mut U) {
+        let len = self.data.remaining();
+
+        assert!(dst.remaining_mut() >= len);
+
+        self.head().encode(len, dst);
+        dst.put(&mut self.data);
+    }
+}
+
+impl<T> From<Data<T>> for Frame<T> {
+    fn from(src: Data<T>) -> Self {
+        Frame::Data(src)
+    }
+}
+
+impl<T> fmt::Debug for Data<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        let mut f = fmt.debug_struct("Data");
+        f.field("stream_id", &self.stream_id);
+        if !self.flags.is_empty() {
+            f.field("flags", &self.flags);
+        }
+        if let Some(ref pad_len) = self.pad_len {
+            f.field("pad_len", pad_len);
+        }
+        // `data` bytes purposefully excluded
+        f.finish()
+    }
+}
+
+// ===== impl DataFlags =====
+
+impl DataFlags {
+    fn load(bits: u8) -> DataFlags {
+        DataFlags(bits & ALL)
+    }
+
+    fn is_empty(&self) -> bool {
+        self.0 == 0
+    }
+
+    fn is_end_stream(&self) -> bool {
+        self.0 & END_STREAM == END_STREAM
+    }
+
+    fn set_end_stream(&mut self) {
+        self.0 |= END_STREAM
+    }
+
+    fn unset_end_stream(&mut self) {
+        self.0 &= !END_STREAM
+    }
+
+    fn is_padded(&self) -> bool {
+        self.0 & PADDED == PADDED
+    }
+
+    #[cfg(feature = "unstable")]
+    fn set_padded(&mut self) {
+        self.0 |= PADDED
+    }
+}
+
+impl From<DataFlags> for u8 {
+    fn from(src: DataFlags) -> u8 {
+        src.0
+    }
+}
+
+impl fmt::Debug for DataFlags {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        util::debug_flags(fmt, self.0)
+            .flag_if(self.is_end_stream(), "END_STREAM")
+            .flag_if(self.is_padded(), "PADDED")
+            .finish()
+    }
+}
diff --git a/src/frame/go_away.rs b/src/frame/go_away.rs
new file mode 100644
index 0000000..99330e9
--- /dev/null
+++ b/src/frame/go_away.rs
@@ -0,0 +1,87 @@
+use std::fmt;
+
+use bytes::{BufMut, Bytes};
+
+use crate::frame::{self, Error, Head, Kind, Reason, StreamId};
+
+#[derive(Clone, Eq, PartialEq)]
+pub struct GoAway {
+    last_stream_id: StreamId,
+    error_code: Reason,
+    debug_data: Bytes,
+}
+
+impl GoAway {
+    pub fn new(last_stream_id: StreamId, reason: Reason) -> Self {
+        GoAway {
+            last_stream_id,
+            error_code: reason,
+            debug_data: Bytes::new(),
+        }
+    }
+
+    pub fn with_debug_data(last_stream_id: StreamId, reason: Reason, debug_data: Bytes) -> Self {
+        Self {
+            last_stream_id,
+            error_code: reason,
+            debug_data,
+        }
+    }
+
+    pub fn last_stream_id(&self) -> StreamId {
+        self.last_stream_id
+    }
+
+    pub fn reason(&self) -> Reason {
+        self.error_code
+    }
+
+    pub fn debug_data(&self) -> &Bytes {
+        &self.debug_data
+    }
+
+    pub fn load(payload: &[u8]) -> Result<GoAway, Error> {
+        if payload.len() < 8 {
+            return Err(Error::BadFrameSize);
+        }
+
+        let (last_stream_id, _) = StreamId::parse(&payload[..4]);
+        let error_code = unpack_octets_4!(payload, 4, u32);
+        let debug_data = Bytes::copy_from_slice(&payload[8..]);
+
+        Ok(GoAway {
+            last_stream_id,
+            error_code: error_code.into(),
+            debug_data,
+        })
+    }
+
+    pub fn encode<B: BufMut>(&self, dst: &mut B) {
+        tracing::trace!("encoding GO_AWAY; code={:?}", self.error_code);
+        let head = Head::new(Kind::GoAway, 0, StreamId::zero());
+        head.encode(8 + self.debug_data.len(), dst);
+        dst.put_u32(self.last_stream_id.into());
+        dst.put_u32(self.error_code.into());
+        dst.put(self.debug_data.slice(..));
+    }
+}
+
+impl<B> From<GoAway> for frame::Frame<B> {
+    fn from(src: GoAway) -> Self {
+        frame::Frame::GoAway(src)
+    }
+}
+
+impl fmt::Debug for GoAway {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let mut builder = f.debug_struct("GoAway");
+        builder.field("error_code", &self.error_code);
+        builder.field("last_stream_id", &self.last_stream_id);
+
+        if !self.debug_data.is_empty() {
+            builder.field("debug_data", &self.debug_data);
+        }
+
+        builder.finish()
+    }
+}
diff --git a/src/frame/head.rs b/src/frame/head.rs
new file mode 100644
index 0000000..38be2f6
--- /dev/null
+++ b/src/frame/head.rs
@@ -0,0 +1,94 @@
+use super::StreamId;
+
+use bytes::BufMut;
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct Head {
+    kind: Kind,
+    flag: u8,
+    stream_id: StreamId,
+}
+
+#[repr(u8)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum Kind {
+    Data = 0,
+    Headers = 1,
+    Priority = 2,
+    Reset = 3,
+    Settings = 4,
+    PushPromise = 5,
+    Ping = 6,
+    GoAway = 7,
+    WindowUpdate = 8,
+    Continuation = 9,
+    Unknown,
+}
+
+// ===== impl Head =====
+
+impl Head {
+    pub fn new(kind: Kind, flag: u8, stream_id: StreamId) -> Head {
+        Head {
+            kind,
+            flag,
+            stream_id,
+        }
+    }
+
+    /// Parse an HTTP/2 frame header
+    pub fn parse(header: &[u8]) -> Head {
+        let (stream_id, _) = StreamId::parse(&header[5..]);
+
+        Head {
+            kind: Kind::new(header[3]),
+            flag: header[4],
+            stream_id,
+        }
+    }
+
+    pub fn stream_id(&self) -> StreamId {
+        self.stream_id
+    }
+
+    pub fn kind(&self) -> Kind {
+        self.kind
+    }
+
+    pub fn flag(&self) -> u8 {
+        self.flag
+    }
+
+    pub fn encode_len(&self) -> usize {
+        super::HEADER_LEN
+    }
+
+    pub fn encode<T: BufMut>(&self, payload_len: usize, dst: &mut T) {
+        debug_assert!(self.encode_len() <= dst.remaining_mut());
+
+        dst.put_uint(payload_len as u64, 3);
+        dst.put_u8(self.kind as u8);
+        dst.put_u8(self.flag);
+        dst.put_u32(self.stream_id.into());
+    }
+}
+
+// ===== impl Kind =====
+
+impl Kind {
+    pub fn new(byte: u8) -> Kind {
+        match byte {
+            0 => Kind::Data,
+            1 => Kind::Headers,
+            2 => Kind::Priority,
+            3 => Kind::Reset,
+            4 => Kind::Settings,
+            5 => Kind::PushPromise,
+            6 => Kind::Ping,
+            7 => Kind::GoAway,
+            8 => Kind::WindowUpdate,
+            9 => Kind::Continuation,
+            _ => Kind::Unknown,
+        }
+    }
+}
diff --git a/src/frame/headers.rs b/src/frame/headers.rs
new file mode 100644
index 0000000..e9b163e
--- /dev/null
+++ b/src/frame/headers.rs
@@ -0,0 +1,1051 @@
+use super::{util, StreamDependency, StreamId};
+use crate::ext::Protocol;
+use crate::frame::{Error, Frame, Head, Kind};
+use crate::hpack::{self, BytesStr};
+
+use http::header::{self, HeaderName, HeaderValue};
+use http::{uri, HeaderMap, Method, Request, StatusCode, Uri};
+
+use bytes::{BufMut, Bytes, BytesMut};
+
+use std::fmt;
+use std::io::Cursor;
+
+type EncodeBuf<'a> = bytes::buf::Limit<&'a mut BytesMut>;
+
+/// Header frame
+///
+/// This could be either a request or a response.
+#[derive(Eq, PartialEq)]
+pub struct Headers {
+    /// The ID of the stream with which this frame is associated.
+    stream_id: StreamId,
+
+    /// The stream dependency information, if any.
+    stream_dep: Option<StreamDependency>,
+
+    /// The header block fragment
+    header_block: HeaderBlock,
+
+    /// The associated flags
+    flags: HeadersFlag,
+}
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub struct HeadersFlag(u8);
+
+#[derive(Eq, PartialEq)]
+pub struct PushPromise {
+    /// The ID of the stream with which this frame is associated.
+    stream_id: StreamId,
+
+    /// The ID of the stream being reserved by this PushPromise.
+    promised_id: StreamId,
+
+    /// The header block fragment
+    header_block: HeaderBlock,
+
+    /// The associated flags
+    flags: PushPromiseFlag,
+}
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub struct PushPromiseFlag(u8);
+
+#[derive(Debug)]
+pub struct Continuation {
+    /// Stream ID of continuation frame
+    stream_id: StreamId,
+
+    header_block: EncodingHeaderBlock,
+}
+
+// TODO: These fields shouldn't be `pub`
+#[derive(Debug, Default, Eq, PartialEq)]
+pub struct Pseudo {
+    // Request
+    pub method: Option<Method>,
+    pub scheme: Option<BytesStr>,
+    pub authority: Option<BytesStr>,
+    pub path: Option<BytesStr>,
+    pub protocol: Option<Protocol>,
+
+    // Response
+    pub status: Option<StatusCode>,
+}
+
+#[derive(Debug)]
+pub struct Iter {
+    /// Pseudo headers
+    pseudo: Option<Pseudo>,
+
+    /// Header fields
+    fields: header::IntoIter<HeaderValue>,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+struct HeaderBlock {
+    /// The decoded header fields
+    fields: HeaderMap,
+
+    /// Precomputed size of all of our header fields, for perf reasons
+    field_size: usize,
+
+    /// Set to true if decoding went over the max header list size.
+    is_over_size: bool,
+
+    /// Pseudo headers, these are broken out as they must be sent as part of the
+    /// headers frame.
+    pseudo: Pseudo,
+}
+
+#[derive(Debug)]
+struct EncodingHeaderBlock {
+    hpack: Bytes,
+}
+
+const END_STREAM: u8 = 0x1;
+const END_HEADERS: u8 = 0x4;
+const PADDED: u8 = 0x8;
+const PRIORITY: u8 = 0x20;
+const ALL: u8 = END_STREAM | END_HEADERS | PADDED | PRIORITY;
+
+// ===== impl Headers =====
+
+impl Headers {
+    /// Create a new HEADERS frame
+    pub fn new(stream_id: StreamId, pseudo: Pseudo, fields: HeaderMap) -> Self {
+        Headers {
+            stream_id,
+            stream_dep: None,
+            header_block: HeaderBlock {
+                field_size: calculate_headermap_size(&fields),
+                fields,
+                is_over_size: false,
+                pseudo,
+            },
+            flags: HeadersFlag::default(),
+        }
+    }
+
+    pub fn trailers(stream_id: StreamId, fields: HeaderMap) -> Self {
+        let mut flags = HeadersFlag::default();
+        flags.set_end_stream();
+
+        Headers {
+            stream_id,
+            stream_dep: None,
+            header_block: HeaderBlock {
+                field_size: calculate_headermap_size(&fields),
+                fields,
+                is_over_size: false,
+                pseudo: Pseudo::default(),
+            },
+            flags,
+        }
+    }
+
+    /// Loads the header frame but doesn't actually do HPACK decoding.
+    ///
+    /// HPACK decoding is done in the `load_hpack` step.
+    pub fn load(head: Head, mut src: BytesMut) -> Result<(Self, BytesMut), Error> {
+        let flags = HeadersFlag(head.flag());
+        let mut pad = 0;
+
+        tracing::trace!("loading headers; flags={:?}", flags);
+
+        if head.stream_id().is_zero() {
+            return Err(Error::InvalidStreamId);
+        }
+
+        // Read the padding length
+        if flags.is_padded() {
+            if src.is_empty() {
+                return Err(Error::MalformedMessage);
+            }
+            pad = src[0] as usize;
+
+            // Drop the padding
+            let _ = src.split_to(1);
+        }
+
+        // Read the stream dependency
+        let stream_dep = if flags.is_priority() {
+            if src.len() < 5 {
+                return Err(Error::MalformedMessage);
+            }
+            let stream_dep = StreamDependency::load(&src[..5])?;
+
+            if stream_dep.dependency_id() == head.stream_id() {
+                return Err(Error::InvalidDependencyId);
+            }
+
+            // Drop the next 5 bytes
+            let _ = src.split_to(5);
+
+            Some(stream_dep)
+        } else {
+            None
+        };
+
+        if pad > 0 {
+            if pad > src.len() {
+                return Err(Error::TooMuchPadding);
+            }
+
+            let len = src.len() - pad;
+            src.truncate(len);
+        }
+
+        let headers = Headers {
+            stream_id: head.stream_id(),
+            stream_dep,
+            header_block: HeaderBlock {
+                fields: HeaderMap::new(),
+                field_size: 0,
+                is_over_size: false,
+                pseudo: Pseudo::default(),
+            },
+            flags,
+        };
+
+        Ok((headers, src))
+    }
+
+    pub fn load_hpack(
+        &mut self,
+        src: &mut BytesMut,
+        max_header_list_size: usize,
+        decoder: &mut hpack::Decoder,
+    ) -> Result<(), Error> {
+        self.header_block.load(src, max_header_list_size, decoder)
+    }
+
+    pub fn stream_id(&self) -> StreamId {
+        self.stream_id
+    }
+
+    pub fn is_end_headers(&self) -> bool {
+        self.flags.is_end_headers()
+    }
+
+    pub fn set_end_headers(&mut self) {
+        self.flags.set_end_headers();
+    }
+
+    pub fn is_end_stream(&self) -> bool {
+        self.flags.is_end_stream()
+    }
+
+    pub fn set_end_stream(&mut self) {
+        self.flags.set_end_stream()
+    }
+
+    pub fn is_over_size(&self) -> bool {
+        self.header_block.is_over_size
+    }
+
+    pub fn into_parts(self) -> (Pseudo, HeaderMap) {
+        (self.header_block.pseudo, self.header_block.fields)
+    }
+
+    #[cfg(feature = "unstable")]
+    pub fn pseudo_mut(&mut self) -> &mut Pseudo {
+        &mut self.header_block.pseudo
+    }
+
+    /// Whether it has status 1xx
+    pub(crate) fn is_informational(&self) -> bool {
+        self.header_block.pseudo.is_informational()
+    }
+
+    pub fn fields(&self) -> &HeaderMap {
+        &self.header_block.fields
+    }
+
+    pub fn into_fields(self) -> HeaderMap {
+        self.header_block.fields
+    }
+
+    pub fn encode(
+        self,
+        encoder: &mut hpack::Encoder,
+        dst: &mut EncodeBuf<'_>,
+    ) -> Option<Continuation> {
+        // At this point, the `is_end_headers` flag should always be set
+        debug_assert!(self.flags.is_end_headers());
+
+        // Get the HEADERS frame head
+        let head = self.head();
+
+        self.header_block
+            .into_encoding(encoder)
+            .encode(&head, dst, |_| {})
+    }
+
+    fn head(&self) -> Head {
+        Head::new(Kind::Headers, self.flags.into(), self.stream_id)
+    }
+}
+
+impl<T> From<Headers> for Frame<T> {
+    fn from(src: Headers) -> Self {
+        Frame::Headers(src)
+    }
+}
+
+impl fmt::Debug for Headers {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let mut builder = f.debug_struct("Headers");
+        builder
+            .field("stream_id", &self.stream_id)
+            .field("flags", &self.flags);
+
+        if let Some(ref protocol) = self.header_block.pseudo.protocol {
+            builder.field("protocol", protocol);
+        }
+
+        if let Some(ref dep) = self.stream_dep {
+            builder.field("stream_dep", dep);
+        }
+
+        // `fields` and `pseudo` purposefully not included
+        builder.finish()
+    }
+}
+
+// ===== util =====
+
+#[derive(Debug, PartialEq, Eq)]
+pub struct ParseU64Error;
+
+pub fn parse_u64(src: &[u8]) -> Result<u64, ParseU64Error> {
+    if src.len() > 19 {
+        // At danger for overflow...
+        return Err(ParseU64Error);
+    }
+
+    let mut ret = 0;
+
+    for &d in src {
+        if d < b'0' || d > b'9' {
+            return Err(ParseU64Error);
+        }
+
+        ret *= 10;
+        ret += (d - b'0') as u64;
+    }
+
+    Ok(ret)
+}
+
+// ===== impl PushPromise =====
+
+#[derive(Debug)]
+pub enum PushPromiseHeaderError {
+    InvalidContentLength(Result<u64, ParseU64Error>),
+    NotSafeAndCacheable,
+}
+
+impl PushPromise {
+    pub fn new(
+        stream_id: StreamId,
+        promised_id: StreamId,
+        pseudo: Pseudo,
+        fields: HeaderMap,
+    ) -> Self {
+        PushPromise {
+            flags: PushPromiseFlag::default(),
+            header_block: HeaderBlock {
+                field_size: calculate_headermap_size(&fields),
+                fields,
+                is_over_size: false,
+                pseudo,
+            },
+            promised_id,
+            stream_id,
+        }
+    }
+
+    pub fn validate_request(req: &Request<()>) -> Result<(), PushPromiseHeaderError> {
+        use PushPromiseHeaderError::*;
+        // The spec has some requirements for promised request headers
+        // [https://httpwg.org/specs/rfc7540.html#PushRequests]
+
+        // A promised request "that indicates the presence of a request body
+        // MUST reset the promised stream with a stream error"
+        if let Some(content_length) = req.headers().get(header::CONTENT_LENGTH) {
+            let parsed_length = parse_u64(content_length.as_bytes());
+            if parsed_length != Ok(0) {
+                return Err(InvalidContentLength(parsed_length));
+            }
+        }
+        // "The server MUST include a method in the :method pseudo-header field
+        // that is safe and cacheable"
+        if !Self::safe_and_cacheable(req.method()) {
+            return Err(NotSafeAndCacheable);
+        }
+
+        Ok(())
+    }
+
+    fn safe_and_cacheable(method: &Method) -> bool {
+        // Cacheable: https://httpwg.org/specs/rfc7231.html#cacheable.methods
+        // Safe: https://httpwg.org/specs/rfc7231.html#safe.methods
+        method == Method::GET || method == Method::HEAD
+    }
+
+    pub fn fields(&self) -> &HeaderMap {
+        &self.header_block.fields
+    }
+
+    #[cfg(feature = "unstable")]
+    pub fn into_fields(self) -> HeaderMap {
+        self.header_block.fields
+    }
+
+    /// Loads the push promise frame but doesn't actually do HPACK decoding.
+    ///
+    /// HPACK decoding is done in the `load_hpack` step.
+    pub fn load(head: Head, mut src: BytesMut) -> Result<(Self, BytesMut), Error> {
+        let flags = PushPromiseFlag(head.flag());
+        let mut pad = 0;
+
+        if head.stream_id().is_zero() {
+            return Err(Error::InvalidStreamId);
+        }
+
+        // Read the padding length
+        if flags.is_padded() {
+            if src.is_empty() {
+                return Err(Error::MalformedMessage);
+            }
+
+            // TODO: Ensure payload is sized correctly
+            pad = src[0] as usize;
+
+            // Drop the padding
+            let _ = src.split_to(1);
+        }
+
+        if src.len() < 5 {
+            return Err(Error::MalformedMessage);
+        }
+
+        let (promised_id, _) = StreamId::parse(&src[..4]);
+        // Drop promised_id bytes
+        let _ = src.split_to(4);
+
+        if pad > 0 {
+            if pad > src.len() {
+                return Err(Error::TooMuchPadding);
+            }
+
+            let len = src.len() - pad;
+            src.truncate(len);
+        }
+
+        let frame = PushPromise {
+            flags,
+            header_block: HeaderBlock {
+                fields: HeaderMap::new(),
+                field_size: 0,
+                is_over_size: false,
+                pseudo: Pseudo::default(),
+            },
+            promised_id,
+            stream_id: head.stream_id(),
+        };
+        Ok((frame, src))
+    }
+
+    pub fn load_hpack(
+        &mut self,
+        src: &mut BytesMut,
+        max_header_list_size: usize,
+        decoder: &mut hpack::Decoder,
+    ) -> Result<(), Error> {
+        self.header_block.load(src, max_header_list_size, decoder)
+    }
+
+    pub fn stream_id(&self) -> StreamId {
+        self.stream_id
+    }
+
+    pub fn promised_id(&self) -> StreamId {
+        self.promised_id
+    }
+
+    pub fn is_end_headers(&self) -> bool {
+        self.flags.is_end_headers()
+    }
+
+    pub fn set_end_headers(&mut self) {
+        self.flags.set_end_headers();
+    }
+
+    pub fn is_over_size(&self) -> bool {
+        self.header_block.is_over_size
+    }
+
+    pub fn encode(
+        self,
+        encoder: &mut hpack::Encoder,
+        dst: &mut EncodeBuf<'_>,
+    ) -> Option<Continuation> {
+        // At this point, the `is_end_headers` flag should always be set
+        debug_assert!(self.flags.is_end_headers());
+
+        let head = self.head();
+        let promised_id = self.promised_id;
+
+        self.header_block
+            .into_encoding(encoder)
+            .encode(&head, dst, |dst| {
+                dst.put_u32(promised_id.into());
+            })
+    }
+
+    fn head(&self) -> Head {
+        Head::new(Kind::PushPromise, self.flags.into(), self.stream_id)
+    }
+
+    /// Consume `self`, returning the parts of the frame
+    pub fn into_parts(self) -> (Pseudo, HeaderMap) {
+        (self.header_block.pseudo, self.header_block.fields)
+    }
+}
+
+impl<T> From<PushPromise> for Frame<T> {
+    fn from(src: PushPromise) -> Self {
+        Frame::PushPromise(src)
+    }
+}
+
+impl fmt::Debug for PushPromise {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("PushPromise")
+            .field("stream_id", &self.stream_id)
+            .field("promised_id", &self.promised_id)
+            .field("flags", &self.flags)
+            // `fields` and `pseudo` purposefully not included
+            .finish()
+    }
+}
+
+// ===== impl Continuation =====
+
+impl Continuation {
+    fn head(&self) -> Head {
+        Head::new(Kind::Continuation, END_HEADERS, self.stream_id)
+    }
+
+    pub fn encode(self, dst: &mut EncodeBuf<'_>) -> Option<Continuation> {
+        // Get the CONTINUATION frame head
+        let head = self.head();
+
+        self.header_block.encode(&head, dst, |_| {})
+    }
+}
+
+// ===== impl Pseudo =====
+
+impl Pseudo {
+    pub fn request(method: Method, uri: Uri, protocol: Option<Protocol>) -> Self {
+        let parts = uri::Parts::from(uri);
+
+        let mut path = parts
+            .path_and_query
+            .map(|v| BytesStr::from(v.as_str()))
+            .unwrap_or(BytesStr::from_static(""));
+
+        match method {
+            Method::OPTIONS | Method::CONNECT => {}
+            _ if path.is_empty() => {
+                path = BytesStr::from_static("/");
+            }
+            _ => {}
+        }
+
+        let mut pseudo = Pseudo {
+            method: Some(method),
+            scheme: None,
+            authority: None,
+            path: Some(path).filter(|p| !p.is_empty()),
+            protocol,
+            status: None,
+        };
+
+        // If the URI includes a scheme component, add it to the pseudo headers
+        //
+        // TODO: Scheme must be set...
+        if let Some(scheme) = parts.scheme {
+            pseudo.set_scheme(scheme);
+        }
+
+        // If the URI includes an authority component, add it to the pseudo
+        // headers
+        if let Some(authority) = parts.authority {
+            pseudo.set_authority(BytesStr::from(authority.as_str()));
+        }
+
+        pseudo
+    }
+
+    pub fn response(status: StatusCode) -> Self {
+        Pseudo {
+            method: None,
+            scheme: None,
+            authority: None,
+            path: None,
+            protocol: None,
+            status: Some(status),
+        }
+    }
+
+    #[cfg(feature = "unstable")]
+    pub fn set_status(&mut self, value: StatusCode) {
+        self.status = Some(value);
+    }
+
+    pub fn set_scheme(&mut self, scheme: uri::Scheme) {
+        let bytes_str = match scheme.as_str() {
+            "http" => BytesStr::from_static("http"),
+            "https" => BytesStr::from_static("https"),
+            s => BytesStr::from(s),
+        };
+        self.scheme = Some(bytes_str);
+    }
+
+    #[cfg(feature = "unstable")]
+    pub fn set_protocol(&mut self, protocol: Protocol) {
+        self.protocol = Some(protocol);
+    }
+
+    pub fn set_authority(&mut self, authority: BytesStr) {
+        self.authority = Some(authority);
+    }
+
+    /// Whether it has status 1xx
+    pub(crate) fn is_informational(&self) -> bool {
+        self.status
+            .map_or(false, |status| status.is_informational())
+    }
+}
+
+// ===== impl EncodingHeaderBlock =====
+
+impl EncodingHeaderBlock {
+    fn encode<F>(mut self, head: &Head, dst: &mut EncodeBuf<'_>, f: F) -> Option<Continuation>
+    where
+        F: FnOnce(&mut EncodeBuf<'_>),
+    {
+        let head_pos = dst.get_ref().len();
+
+        // At this point, we don't know how big the h2 frame will be.
+        // So, we write the head with length 0, then write the body, and
+        // finally write the length once we know the size.
+        head.encode(0, dst);
+
+        let payload_pos = dst.get_ref().len();
+
+        f(dst);
+
+        // Now, encode the header payload
+        let continuation = if self.hpack.len() > dst.remaining_mut() {
+            dst.put_slice(&self.hpack.split_to(dst.remaining_mut()));
+
+            Some(Continuation {
+                stream_id: head.stream_id(),
+                header_block: self,
+            })
+        } else {
+            dst.put_slice(&self.hpack);
+
+            None
+        };
+
+        // Compute the header block length
+        let payload_len = (dst.get_ref().len() - payload_pos) as u64;
+
+        // Write the frame length
+        let payload_len_be = payload_len.to_be_bytes();
+        assert!(payload_len_be[0..5].iter().all(|b| *b == 0));
+        (dst.get_mut()[head_pos..head_pos + 3]).copy_from_slice(&payload_len_be[5..]);
+
+        if continuation.is_some() {
+            // There will be continuation frames, so the `is_end_headers` flag
+            // must be unset
+            debug_assert!(dst.get_ref()[head_pos + 4] & END_HEADERS == END_HEADERS);
+
+            dst.get_mut()[head_pos + 4] -= END_HEADERS;
+        }
+
+        continuation
+    }
+}
+
+// ===== impl Iter =====
+
+impl Iterator for Iter {
+    type Item = hpack::Header<Option<HeaderName>>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        use crate::hpack::Header::*;
+
+        if let Some(ref mut pseudo) = self.pseudo {
+            if let Some(method) = pseudo.method.take() {
+                return Some(Method(method));
+            }
+
+            if let Some(scheme) = pseudo.scheme.take() {
+                return Some(Scheme(scheme));
+            }
+
+            if let Some(authority) = pseudo.authority.take() {
+                return Some(Authority(authority));
+            }
+
+            if let Some(path) = pseudo.path.take() {
+                return Some(Path(path));
+            }
+
+            if let Some(protocol) = pseudo.protocol.take() {
+                return Some(Protocol(protocol));
+            }
+
+            if let Some(status) = pseudo.status.take() {
+                return Some(Status(status));
+            }
+        }
+
+        self.pseudo = None;
+
+        self.fields
+            .next()
+            .map(|(name, value)| Field { name, value })
+    }
+}
+
+// ===== impl HeadersFlag =====
+
+impl HeadersFlag {
+    pub fn empty() -> HeadersFlag {
+        HeadersFlag(0)
+    }
+
+    pub fn load(bits: u8) -> HeadersFlag {
+        HeadersFlag(bits & ALL)
+    }
+
+    pub fn is_end_stream(&self) -> bool {
+        self.0 & END_STREAM == END_STREAM
+    }
+
+    pub fn set_end_stream(&mut self) {
+        self.0 |= END_STREAM;
+    }
+
+    pub fn is_end_headers(&self) -> bool {
+        self.0 & END_HEADERS == END_HEADERS
+    }
+
+    pub fn set_end_headers(&mut self) {
+        self.0 |= END_HEADERS;
+    }
+
+    pub fn is_padded(&self) -> bool {
+        self.0 & PADDED == PADDED
+    }
+
+    pub fn is_priority(&self) -> bool {
+        self.0 & PRIORITY == PRIORITY
+    }
+}
+
+impl Default for HeadersFlag {
+    /// Returns a `HeadersFlag` value with `END_HEADERS` set.
+    fn default() -> Self {
+        HeadersFlag(END_HEADERS)
+    }
+}
+
+impl From<HeadersFlag> for u8 {
+    fn from(src: HeadersFlag) -> u8 {
+        src.0
+    }
+}
+
+impl fmt::Debug for HeadersFlag {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        util::debug_flags(fmt, self.0)
+            .flag_if(self.is_end_headers(), "END_HEADERS")
+            .flag_if(self.is_end_stream(), "END_STREAM")
+            .flag_if(self.is_padded(), "PADDED")
+            .flag_if(self.is_priority(), "PRIORITY")
+            .finish()
+    }
+}
+
+// ===== impl PushPromiseFlag =====
+
+impl PushPromiseFlag {
+    pub fn empty() -> PushPromiseFlag {
+        PushPromiseFlag(0)
+    }
+
+    pub fn load(bits: u8) -> PushPromiseFlag {
+        PushPromiseFlag(bits & ALL)
+    }
+
+    pub fn is_end_headers(&self) -> bool {
+        self.0 & END_HEADERS == END_HEADERS
+    }
+
+    pub fn set_end_headers(&mut self) {
+        self.0 |= END_HEADERS;
+    }
+
+    pub fn is_padded(&self) -> bool {
+        self.0 & PADDED == PADDED
+    }
+}
+
+impl Default for PushPromiseFlag {
+    /// Returns a `PushPromiseFlag` value with `END_HEADERS` set.
+    fn default() -> Self {
+        PushPromiseFlag(END_HEADERS)
+    }
+}
+
+impl From<PushPromiseFlag> for u8 {
+    fn from(src: PushPromiseFlag) -> u8 {
+        src.0
+    }
+}
+
+impl fmt::Debug for PushPromiseFlag {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        util::debug_flags(fmt, self.0)
+            .flag_if(self.is_end_headers(), "END_HEADERS")
+            .flag_if(self.is_padded(), "PADDED")
+            .finish()
+    }
+}
+
+// ===== HeaderBlock =====
+
+impl HeaderBlock {
+    fn load(
+        &mut self,
+        src: &mut BytesMut,
+        max_header_list_size: usize,
+        decoder: &mut hpack::Decoder,
+    ) -> Result<(), Error> {
+        let mut reg = !self.fields.is_empty();
+        let mut malformed = false;
+        let mut headers_size = self.calculate_header_list_size();
+
+        macro_rules! set_pseudo {
+            ($field:ident, $val:expr) => {{
+                if reg {
+                    tracing::trace!("load_hpack; header malformed -- pseudo not at head of block");
+                    malformed = true;
+                } else if self.pseudo.$field.is_some() {
+                    tracing::trace!("load_hpack; header malformed -- repeated pseudo");
+                    malformed = true;
+                } else {
+                    let __val = $val;
+                    headers_size +=
+                        decoded_header_size(stringify!($field).len() + 1, __val.as_str().len());
+                    if headers_size < max_header_list_size {
+                        self.pseudo.$field = Some(__val);
+                    } else if !self.is_over_size {
+                        tracing::trace!("load_hpack; header list size over max");
+                        self.is_over_size = true;
+                    }
+                }
+            }};
+        }
+
+        let mut cursor = Cursor::new(src);
+
+        // If the header frame is malformed, we still have to continue decoding
+        // the headers. A malformed header frame is a stream level error, but
+        // the hpack state is connection level. In order to maintain correct
+        // state for other streams, the hpack decoding process must complete.
+        let res = decoder.decode(&mut cursor, |header| {
+            use crate::hpack::Header::*;
+
+            match header {
+                Field { name, value } => {
+                    // Connection level header fields are not supported and must
+                    // result in a protocol error.
+
+                    if name == header::CONNECTION
+                        || name == header::TRANSFER_ENCODING
+                        || name == header::UPGRADE
+                        || name == "keep-alive"
+                        || name == "proxy-connection"
+                    {
+                        tracing::trace!("load_hpack; connection level header");
+                        malformed = true;
+                    } else if name == header::TE && value != "trailers" {
+                        tracing::trace!(
+                            "load_hpack; TE header not set to trailers; val={:?}",
+                            value
+                        );
+                        malformed = true;
+                    } else {
+                        reg = true;
+
+                        headers_size += decoded_header_size(name.as_str().len(), value.len());
+                        if headers_size < max_header_list_size {
+                            self.field_size +=
+                                decoded_header_size(name.as_str().len(), value.len());
+                            self.fields.append(name, value);
+                        } else if !self.is_over_size {
+                            tracing::trace!("load_hpack; header list size over max");
+                            self.is_over_size = true;
+                        }
+                    }
+                }
+                Authority(v) => set_pseudo!(authority, v),
+                Method(v) => set_pseudo!(method, v),
+                Scheme(v) => set_pseudo!(scheme, v),
+                Path(v) => set_pseudo!(path, v),
+                Protocol(v) => set_pseudo!(protocol, v),
+                Status(v) => set_pseudo!(status, v),
+            }
+        });
+
+        if let Err(e) = res {
+            tracing::trace!("hpack decoding error; err={:?}", e);
+            return Err(e.into());
+        }
+
+        if malformed {
+            tracing::trace!("malformed message");
+            return Err(Error::MalformedMessage);
+        }
+
+        Ok(())
+    }
+
+    fn into_encoding(self, encoder: &mut hpack::Encoder) -> EncodingHeaderBlock {
+        let mut hpack = BytesMut::new();
+        let headers = Iter {
+            pseudo: Some(self.pseudo),
+            fields: self.fields.into_iter(),
+        };
+
+        encoder.encode(headers, &mut hpack);
+
+        EncodingHeaderBlock {
+            hpack: hpack.freeze(),
+        }
+    }
+
+    /// Calculates the size of the currently decoded header list.
+    ///
+    /// According to http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE
+    ///
+    /// > The value is based on the uncompressed size of header fields,
+    /// > including the length of the name and value in octets plus an
+    /// > overhead of 32 octets for each header field.
+    fn calculate_header_list_size(&self) -> usize {
+        macro_rules! pseudo_size {
+            ($name:ident) => {{
+                self.pseudo
+                    .$name
+                    .as_ref()
+                    .map(|m| decoded_header_size(stringify!($name).len() + 1, m.as_str().len()))
+                    .unwrap_or(0)
+            }};
+        }
+
+        pseudo_size!(method)
+            + pseudo_size!(scheme)
+            + pseudo_size!(status)
+            + pseudo_size!(authority)
+            + pseudo_size!(path)
+            + self.field_size
+    }
+}
+
+fn calculate_headermap_size(map: &HeaderMap) -> usize {
+    map.iter()
+        .map(|(name, value)| decoded_header_size(name.as_str().len(), value.len()))
+        .sum::<usize>()
+}
+
+fn decoded_header_size(name: usize, value: usize) -> usize {
+    name + value + 32
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use crate::frame;
+    use crate::hpack::{huffman, Encoder};
+
+    #[test]
+    fn test_nameless_header_at_resume() {
+        let mut encoder = Encoder::default();
+        let mut dst = BytesMut::new();
+
+        let headers = Headers::new(
+            StreamId::ZERO,
+            Default::default(),
+            HeaderMap::from_iter(vec![
+                (
+                    HeaderName::from_static("hello"),
+                    HeaderValue::from_static("world"),
+                ),
+                (
+                    HeaderName::from_static("hello"),
+                    HeaderValue::from_static("zomg"),
+                ),
+                (
+                    HeaderName::from_static("hello"),
+                    HeaderValue::from_static("sup"),
+                ),
+            ]),
+        );
+
+        let continuation = headers
+            .encode(&mut encoder, &mut (&mut dst).limit(frame::HEADER_LEN + 8))
+            .unwrap();
+
+        assert_eq!(17, dst.len());
+        assert_eq!([0, 0, 8, 1, 0, 0, 0, 0, 0], &dst[0..9]);
+        assert_eq!(&[0x40, 0x80 | 4], &dst[9..11]);
+        assert_eq!("hello", huff_decode(&dst[11..15]));
+        assert_eq!(0x80 | 4, dst[15]);
+
+        let mut world = dst[16..17].to_owned();
+
+        dst.clear();
+
+        assert!(continuation
+            .encode(&mut (&mut dst).limit(frame::HEADER_LEN + 16))
+            .is_none());
+
+        world.extend_from_slice(&dst[9..12]);
+        assert_eq!("world", huff_decode(&world));
+
+        assert_eq!(24, dst.len());
+        assert_eq!([0, 0, 15, 9, 4, 0, 0, 0, 0], &dst[0..9]);
+
+        // // Next is not indexed
+        assert_eq!(&[15, 47, 0x80 | 3], &dst[12..15]);
+        assert_eq!("zomg", huff_decode(&dst[15..18]));
+        assert_eq!(&[15, 47, 0x80 | 3], &dst[18..21]);
+        assert_eq!("sup", huff_decode(&dst[21..]));
+    }
+
+    fn huff_decode(src: &[u8]) -> BytesMut {
+        let mut buf = BytesMut::new();
+        huffman::decode(src, &mut buf).unwrap()
+    }
+}
diff --git a/src/frame/mod.rs b/src/frame/mod.rs
new file mode 100644
index 0000000..0e8e703
--- /dev/null
+++ b/src/frame/mod.rs
@@ -0,0 +1,171 @@
+use crate::hpack;
+
+use bytes::Bytes;
+
+use std::fmt;
+
+/// A helper macro that unpacks a sequence of 4 bytes found in the buffer with
+/// the given identifier, starting at the given offset, into the given integer
+/// type. Obviously, the integer type should be able to support at least 4
+/// bytes.
+///
+/// # Examples
+///
+/// ```ignore
+/// # // We ignore this doctest because the macro is not exported.
+/// let buf: [u8; 4] = [0, 0, 0, 1];
+/// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32));
+/// ```
+macro_rules! unpack_octets_4 {
+    // TODO: Get rid of this macro
+    ($buf:expr, $offset:expr, $tip:ty) => {
+        (($buf[$offset + 0] as $tip) << 24)
+            | (($buf[$offset + 1] as $tip) << 16)
+            | (($buf[$offset + 2] as $tip) << 8)
+            | (($buf[$offset + 3] as $tip) << 0)
+    };
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn test_unpack_octets_4() {
+        let buf: [u8; 4] = [0, 0, 0, 1];
+        assert_eq!(1u32, unpack_octets_4!(buf, 0, u32));
+    }
+}
+
+mod data;
+mod go_away;
+mod head;
+mod headers;
+mod ping;
+mod priority;
+mod reason;
+mod reset;
+mod settings;
+mod stream_id;
+mod util;
+mod window_update;
+
+pub use self::data::Data;
+pub use self::go_away::GoAway;
+pub use self::head::{Head, Kind};
+pub use self::headers::{
+    parse_u64, Continuation, Headers, Pseudo, PushPromise, PushPromiseHeaderError,
+};
+pub use self::ping::Ping;
+pub use self::priority::{Priority, StreamDependency};
+pub use self::reason::Reason;
+pub use self::reset::Reset;
+pub use self::settings::Settings;
+pub use self::stream_id::{StreamId, StreamIdOverflow};
+pub use self::window_update::WindowUpdate;
+
+#[cfg(feature = "unstable")]
+pub use crate::hpack::BytesStr;
+
+// Re-export some constants
+
+pub use self::settings::{
+    DEFAULT_INITIAL_WINDOW_SIZE, DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE,
+    MAX_MAX_FRAME_SIZE,
+};
+
+pub type FrameSize = u32;
+
+pub const HEADER_LEN: usize = 9;
+
+#[derive(Eq, PartialEq)]
+pub enum Frame<T = Bytes> {
+    Data(Data<T>),
+    Headers(Headers),
+    Priority(Priority),
+    PushPromise(PushPromise),
+    Settings(Settings),
+    Ping(Ping),
+    GoAway(GoAway),
+    WindowUpdate(WindowUpdate),
+    Reset(Reset),
+}
+
+impl<T> Frame<T> {
+    pub fn map<F, U>(self, f: F) -> Frame<U>
+    where
+        F: FnOnce(T) -> U,
+    {
+        use self::Frame::*;
+
+        match self {
+            Data(frame) => frame.map(f).into(),
+            Headers(frame) => frame.into(),
+            Priority(frame) => frame.into(),
+            PushPromise(frame) => frame.into(),
+            Settings(frame) => frame.into(),
+            Ping(frame) => frame.into(),
+            GoAway(frame) => frame.into(),
+            WindowUpdate(frame) => frame.into(),
+            Reset(frame) => frame.into(),
+        }
+    }
+}
+
+impl<T> fmt::Debug for Frame<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        use self::Frame::*;
+
+        match *self {
+            Data(ref frame) => fmt::Debug::fmt(frame, fmt),
+            Headers(ref frame) => fmt::Debug::fmt(frame, fmt),
+            Priority(ref frame) => fmt::Debug::fmt(frame, fmt),
+            PushPromise(ref frame) => fmt::Debug::fmt(frame, fmt),
+            Settings(ref frame) => fmt::Debug::fmt(frame, fmt),
+            Ping(ref frame) => fmt::Debug::fmt(frame, fmt),
+            GoAway(ref frame) => fmt::Debug::fmt(frame, fmt),
+            WindowUpdate(ref frame) => fmt::Debug::fmt(frame, fmt),
+            Reset(ref frame) => fmt::Debug::fmt(frame, fmt),
+        }
+    }
+}
+
+/// Errors that can occur during parsing an HTTP/2 frame.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum Error {
+    /// A length value other than 8 was set on a PING message.
+    BadFrameSize,
+
+    /// The padding length was larger than the frame-header-specified
+    /// length of the payload.
+    TooMuchPadding,
+
+    /// An invalid setting value was provided
+    InvalidSettingValue,
+
+    /// An invalid window update value
+    InvalidWindowUpdateValue,
+
+    /// The payload length specified by the frame header was not the
+    /// value necessary for the specific frame type.
+    InvalidPayloadLength,
+
+    /// Received a payload with an ACK settings frame
+    InvalidPayloadAckSettings,
+
+    /// An invalid stream identifier was provided.
+    ///
+    /// This is returned if a SETTINGS or PING frame is received with a stream
+    /// identifier other than zero.
+    InvalidStreamId,
+
+    /// A request or response is malformed.
+    MalformedMessage,
+
+    /// An invalid stream dependency ID was provided
+    ///
+    /// This is returned if a HEADERS or PRIORITY frame is received with an
+    /// invalid stream identifier.
+    InvalidDependencyId,
+
+    /// Failed to perform HPACK decoding
+    Hpack(hpack::DecoderError),
+}
diff --git a/src/frame/ping.rs b/src/frame/ping.rs
new file mode 100644
index 0000000..241d06e
--- /dev/null
+++ b/src/frame/ping.rs
@@ -0,0 +1,102 @@
+use crate::frame::{Error, Frame, Head, Kind, StreamId};
+use bytes::BufMut;
+
+const ACK_FLAG: u8 = 0x1;
+
+pub type Payload = [u8; 8];
+
+#[derive(Debug, Eq, PartialEq)]
+pub struct Ping {
+    ack: bool,
+    payload: Payload,
+}
+
+// This was just 8 randomly generated bytes. We use something besides just
+// zeroes to distinguish this specific PING from any other.
+const SHUTDOWN_PAYLOAD: Payload = [0x0b, 0x7b, 0xa2, 0xf0, 0x8b, 0x9b, 0xfe, 0x54];
+const USER_PAYLOAD: Payload = [0x3b, 0x7c, 0xdb, 0x7a, 0x0b, 0x87, 0x16, 0xb4];
+
+impl Ping {
+    #[cfg(feature = "unstable")]
+    pub const SHUTDOWN: Payload = SHUTDOWN_PAYLOAD;
+
+    #[cfg(not(feature = "unstable"))]
+    pub(crate) const SHUTDOWN: Payload = SHUTDOWN_PAYLOAD;
+
+    #[cfg(feature = "unstable")]
+    pub const USER: Payload = USER_PAYLOAD;
+
+    #[cfg(not(feature = "unstable"))]
+    pub(crate) const USER: Payload = USER_PAYLOAD;
+
+    pub fn new(payload: Payload) -> Ping {
+        Ping {
+            ack: false,
+            payload,
+        }
+    }
+
+    pub fn pong(payload: Payload) -> Ping {
+        Ping { ack: true, payload }
+    }
+
+    pub fn is_ack(&self) -> bool {
+        self.ack
+    }
+
+    pub fn payload(&self) -> &Payload {
+        &self.payload
+    }
+
+    pub fn into_payload(self) -> Payload {
+        self.payload
+    }
+
+    /// Builds a `Ping` frame from a raw frame.
+    pub fn load(head: Head, bytes: &[u8]) -> Result<Ping, Error> {
+        debug_assert_eq!(head.kind(), crate::frame::Kind::Ping);
+
+        // PING frames are not associated with any individual stream. If a PING
+        // frame is received with a stream identifier field value other than
+        // 0x0, the recipient MUST respond with a connection error
+        // (Section 5.4.1) of type PROTOCOL_ERROR.
+        if !head.stream_id().is_zero() {
+            return Err(Error::InvalidStreamId);
+        }
+
+        // In addition to the frame header, PING frames MUST contain 8 octets of opaque
+        // data in the payload.
+        if bytes.len() != 8 {
+            return Err(Error::BadFrameSize);
+        }
+
+        let mut payload = [0; 8];
+        payload.copy_from_slice(bytes);
+
+        // The PING frame defines the following flags:
+        //
+        // ACK (0x1): When set, bit 0 indicates that this PING frame is a PING
+        //    response. An endpoint MUST set this flag in PING responses. An
+        //    endpoint MUST NOT respond to PING frames containing this flag.
+        let ack = head.flag() & ACK_FLAG != 0;
+
+        Ok(Ping { ack, payload })
+    }
+
+    pub fn encode<B: BufMut>(&self, dst: &mut B) {
+        let sz = self.payload.len();
+        tracing::trace!("encoding PING; ack={} len={}", self.ack, sz);
+
+        let flags = if self.ack { ACK_FLAG } else { 0 };
+        let head = Head::new(Kind::Ping, flags, StreamId::zero());
+
+        head.encode(sz, dst);
+        dst.put_slice(&self.payload);
+    }
+}
+
+impl<T> From<Ping> for Frame<T> {
+    fn from(src: Ping) -> Frame<T> {
+        Frame::Ping(src)
+    }
+}
diff --git a/src/frame/priority.rs b/src/frame/priority.rs
new file mode 100644
index 0000000..d7d47db
--- /dev/null
+++ b/src/frame/priority.rs
@@ -0,0 +1,72 @@
+use crate::frame::*;
+
+#[derive(Debug, Eq, PartialEq)]
+pub struct Priority {
+    stream_id: StreamId,
+    dependency: StreamDependency,
+}
+
+#[derive(Debug, Eq, PartialEq)]
+pub struct StreamDependency {
+    /// The ID of the stream dependency target
+    dependency_id: StreamId,
+
+    /// The weight for the stream. The value exposed (and set) here is always in
+    /// the range [0, 255], instead of [1, 256] (as defined in section 5.3.2.)
+    /// so that the value fits into a `u8`.
+    weight: u8,
+
+    /// True if the stream dependency is exclusive.
+    is_exclusive: bool,
+}
+
+impl Priority {
+    pub fn load(head: Head, payload: &[u8]) -> Result<Self, Error> {
+        let dependency = StreamDependency::load(payload)?;
+
+        if dependency.dependency_id() == head.stream_id() {
+            return Err(Error::InvalidDependencyId);
+        }
+
+        Ok(Priority {
+            stream_id: head.stream_id(),
+            dependency,
+        })
+    }
+}
+
+impl<B> From<Priority> for Frame<B> {
+    fn from(src: Priority) -> Self {
+        Frame::Priority(src)
+    }
+}
+
+// ===== impl StreamDependency =====
+
+impl StreamDependency {
+    pub fn new(dependency_id: StreamId, weight: u8, is_exclusive: bool) -> Self {
+        StreamDependency {
+            dependency_id,
+            weight,
+            is_exclusive,
+        }
+    }
+
+    pub fn load(src: &[u8]) -> Result<Self, Error> {
+        if src.len() != 5 {
+            return Err(Error::InvalidPayloadLength);
+        }
+
+        // Parse the stream ID and exclusive flag
+        let (dependency_id, is_exclusive) = StreamId::parse(&src[..4]);
+
+        // Read the weight
+        let weight = src[4];
+
+        Ok(StreamDependency::new(dependency_id, weight, is_exclusive))
+    }
+
+    pub fn dependency_id(&self) -> StreamId {
+        self.dependency_id
+    }
+}
diff --git a/src/frame/reason.rs b/src/frame/reason.rs
new file mode 100644
index 0000000..ff5e201
--- /dev/null
+++ b/src/frame/reason.rs
@@ -0,0 +1,134 @@
+use std::fmt;
+
+/// HTTP/2 error codes.
+///
+/// Error codes are used in `RST_STREAM` and `GOAWAY` frames to convey the
+/// reasons for the stream or connection error. For example,
+/// [`SendStream::send_reset`] takes a `Reason` argument. Also, the `Error` type
+/// may contain a `Reason`.
+///
+/// Error codes share a common code space. Some error codes apply only to
+/// streams, others apply only to connections, and others may apply to either.
+/// See [RFC 7540] for more information.
+///
+/// See [Error Codes in the spec][spec].
+///
+/// [spec]: http://httpwg.org/specs/rfc7540.html#ErrorCodes
+/// [`SendStream::send_reset`]: struct.SendStream.html#method.send_reset
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub struct Reason(u32);
+
+impl Reason {
+    /// The associated condition is not a result of an error.
+    ///
+    /// For example, a GOAWAY might include this code to indicate graceful
+    /// shutdown of a connection.
+    pub const NO_ERROR: Reason = Reason(0);
+    /// The endpoint detected an unspecific protocol error.
+    ///
+    /// This error is for use when a more specific error code is not available.
+    pub const PROTOCOL_ERROR: Reason = Reason(1);
+    /// The endpoint encountered an unexpected internal error.
+    pub const INTERNAL_ERROR: Reason = Reason(2);
+    /// The endpoint detected that its peer violated the flow-control protocol.
+    pub const FLOW_CONTROL_ERROR: Reason = Reason(3);
+    /// The endpoint sent a SETTINGS frame but did not receive a response in
+    /// a timely manner.
+    pub const SETTINGS_TIMEOUT: Reason = Reason(4);
+    /// The endpoint received a frame after a stream was half-closed.
+    pub const STREAM_CLOSED: Reason = Reason(5);
+    /// The endpoint received a frame with an invalid size.
+    pub const FRAME_SIZE_ERROR: Reason = Reason(6);
+    /// The endpoint refused the stream prior to performing any application
+    /// processing.
+    pub const REFUSED_STREAM: Reason = Reason(7);
+    /// Used by the endpoint to indicate that the stream is no longer needed.
+    pub const CANCEL: Reason = Reason(8);
+    /// The endpoint is unable to maintain the header compression context for
+    /// the connection.
+    pub const COMPRESSION_ERROR: Reason = Reason(9);
+    /// The connection established in response to a CONNECT request was reset
+    /// or abnormally closed.
+    pub const CONNECT_ERROR: Reason = Reason(10);
+    /// The endpoint detected that its peer is exhibiting a behavior that might
+    /// be generating excessive load.
+    pub const ENHANCE_YOUR_CALM: Reason = Reason(11);
+    /// The underlying transport has properties that do not meet minimum
+    /// security requirements.
+    pub const INADEQUATE_SECURITY: Reason = Reason(12);
+    /// The endpoint requires that HTTP/1.1 be used instead of HTTP/2.
+    pub const HTTP_1_1_REQUIRED: Reason = Reason(13);
+
+    /// Get a string description of the error code.
+    pub fn description(&self) -> &str {
+        match self.0 {
+            0 => "not a result of an error",
+            1 => "unspecific protocol error detected",
+            2 => "unexpected internal error encountered",
+            3 => "flow-control protocol violated",
+            4 => "settings ACK not received in timely manner",
+            5 => "received frame when stream half-closed",
+            6 => "frame with invalid size",
+            7 => "refused stream before processing any application logic",
+            8 => "stream no longer needed",
+            9 => "unable to maintain the header compression context",
+            10 => {
+                "connection established in response to a CONNECT request was reset or abnormally \
+                 closed"
+            }
+            11 => "detected excessive load generating behavior",
+            12 => "security properties do not meet minimum requirements",
+            13 => "endpoint requires HTTP/1.1",
+            _ => "unknown reason",
+        }
+    }
+}
+
+impl From<u32> for Reason {
+    fn from(src: u32) -> Reason {
+        Reason(src)
+    }
+}
+
+impl From<Reason> for u32 {
+    fn from(src: Reason) -> u32 {
+        src.0
+    }
+}
+
+impl fmt::Debug for Reason {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let name = match self.0 {
+            0 => "NO_ERROR",
+            1 => "PROTOCOL_ERROR",
+            2 => "INTERNAL_ERROR",
+            3 => "FLOW_CONTROL_ERROR",
+            4 => "SETTINGS_TIMEOUT",
+            5 => "STREAM_CLOSED",
+            6 => "FRAME_SIZE_ERROR",
+            7 => "REFUSED_STREAM",
+            8 => "CANCEL",
+            9 => "COMPRESSION_ERROR",
+            10 => "CONNECT_ERROR",
+            11 => "ENHANCE_YOUR_CALM",
+            12 => "INADEQUATE_SECURITY",
+            13 => "HTTP_1_1_REQUIRED",
+            other => return f.debug_tuple("Reason").field(&Hex(other)).finish(),
+        };
+        f.write_str(name)
+    }
+}
+
+struct Hex(u32);
+
+impl fmt::Debug for Hex {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::LowerHex::fmt(&self.0, f)
+    }
+}
+
+impl fmt::Display for Reason {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "{}", self.description())
+    }
+}
diff --git a/src/frame/reset.rs b/src/frame/reset.rs
new file mode 100644
index 0000000..39f6ac2
--- /dev/null
+++ b/src/frame/reset.rs
@@ -0,0 +1,56 @@
+use crate::frame::{self, Error, Head, Kind, Reason, StreamId};
+
+use bytes::BufMut;
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub struct Reset {
+    stream_id: StreamId,
+    error_code: Reason,
+}
+
+impl Reset {
+    pub fn new(stream_id: StreamId, error: Reason) -> Reset {
+        Reset {
+            stream_id,
+            error_code: error,
+        }
+    }
+
+    pub fn stream_id(&self) -> StreamId {
+        self.stream_id
+    }
+
+    pub fn reason(&self) -> Reason {
+        self.error_code
+    }
+
+    pub fn load(head: Head, payload: &[u8]) -> Result<Reset, Error> {
+        if payload.len() != 4 {
+            return Err(Error::InvalidPayloadLength);
+        }
+
+        let error_code = unpack_octets_4!(payload, 0, u32);
+
+        Ok(Reset {
+            stream_id: head.stream_id(),
+            error_code: error_code.into(),
+        })
+    }
+
+    pub fn encode<B: BufMut>(&self, dst: &mut B) {
+        tracing::trace!(
+            "encoding RESET; id={:?} code={:?}",
+            self.stream_id,
+            self.error_code
+        );
+        let head = Head::new(Kind::Reset, 0, self.stream_id);
+        head.encode(4, dst);
+        dst.put_u32(self.error_code.into());
+    }
+}
+
+impl<B> From<Reset> for frame::Frame<B> {
+    fn from(src: Reset) -> Self {
+        frame::Frame::Reset(src)
+    }
+}
diff --git a/src/frame/settings.rs b/src/frame/settings.rs
new file mode 100644
index 0000000..484498a
--- /dev/null
+++ b/src/frame/settings.rs
@@ -0,0 +1,389 @@
+use std::fmt;
+
+use crate::frame::{util, Error, Frame, FrameSize, Head, Kind, StreamId};
+use bytes::{BufMut, BytesMut};
+
+#[derive(Clone, Default, Eq, PartialEq)]
+pub struct Settings {
+    flags: SettingsFlags,
+    // Fields
+    header_table_size: Option<u32>,
+    enable_push: Option<u32>,
+    max_concurrent_streams: Option<u32>,
+    initial_window_size: Option<u32>,
+    max_frame_size: Option<u32>,
+    max_header_list_size: Option<u32>,
+    enable_connect_protocol: Option<u32>,
+}
+
+/// An enum that lists all valid settings that can be sent in a SETTINGS
+/// frame.
+///
+/// Each setting has a value that is a 32 bit unsigned integer (6.5.1.).
+#[derive(Debug)]
+pub enum Setting {
+    HeaderTableSize(u32),
+    EnablePush(u32),
+    MaxConcurrentStreams(u32),
+    InitialWindowSize(u32),
+    MaxFrameSize(u32),
+    MaxHeaderListSize(u32),
+    EnableConnectProtocol(u32),
+}
+
+#[derive(Copy, Clone, Eq, PartialEq, Default)]
+pub struct SettingsFlags(u8);
+
+const ACK: u8 = 0x1;
+const ALL: u8 = ACK;
+
+/// The default value of SETTINGS_HEADER_TABLE_SIZE
+pub const DEFAULT_SETTINGS_HEADER_TABLE_SIZE: usize = 4_096;
+
+/// The default value of SETTINGS_INITIAL_WINDOW_SIZE
+pub const DEFAULT_INITIAL_WINDOW_SIZE: u32 = 65_535;
+
+/// The default value of MAX_FRAME_SIZE
+pub const DEFAULT_MAX_FRAME_SIZE: FrameSize = 16_384;
+
+/// INITIAL_WINDOW_SIZE upper bound
+pub const MAX_INITIAL_WINDOW_SIZE: usize = (1 << 31) - 1;
+
+/// MAX_FRAME_SIZE upper bound
+pub const MAX_MAX_FRAME_SIZE: FrameSize = (1 << 24) - 1;
+
+// ===== impl Settings =====
+
+impl Settings {
+    pub fn ack() -> Settings {
+        Settings {
+            flags: SettingsFlags::ack(),
+            ..Settings::default()
+        }
+    }
+
+    pub fn is_ack(&self) -> bool {
+        self.flags.is_ack()
+    }
+
+    pub fn initial_window_size(&self) -> Option<u32> {
+        self.initial_window_size
+    }
+
+    pub fn set_initial_window_size(&mut self, size: Option<u32>) {
+        self.initial_window_size = size;
+    }
+
+    pub fn max_concurrent_streams(&self) -> Option<u32> {
+        self.max_concurrent_streams
+    }
+
+    pub fn set_max_concurrent_streams(&mut self, max: Option<u32>) {
+        self.max_concurrent_streams = max;
+    }
+
+    pub fn max_frame_size(&self) -> Option<u32> {
+        self.max_frame_size
+    }
+
+    pub fn set_max_frame_size(&mut self, size: Option<u32>) {
+        if let Some(val) = size {
+            assert!(DEFAULT_MAX_FRAME_SIZE <= val && val <= MAX_MAX_FRAME_SIZE);
+        }
+        self.max_frame_size = size;
+    }
+
+    pub fn max_header_list_size(&self) -> Option<u32> {
+        self.max_header_list_size
+    }
+
+    pub fn set_max_header_list_size(&mut self, size: Option<u32>) {
+        self.max_header_list_size = size;
+    }
+
+    pub fn is_push_enabled(&self) -> Option<bool> {
+        self.enable_push.map(|val| val != 0)
+    }
+
+    pub fn set_enable_push(&mut self, enable: bool) {
+        self.enable_push = Some(enable as u32);
+    }
+
+    pub fn is_extended_connect_protocol_enabled(&self) -> Option<bool> {
+        self.enable_connect_protocol.map(|val| val != 0)
+    }
+
+    pub fn set_enable_connect_protocol(&mut self, val: Option<u32>) {
+        self.enable_connect_protocol = val;
+    }
+
+    pub fn header_table_size(&self) -> Option<u32> {
+        self.header_table_size
+    }
+
+    pub fn set_header_table_size(&mut self, size: Option<u32>) {
+        self.header_table_size = size;
+    }
+
+    pub fn load(head: Head, payload: &[u8]) -> Result<Settings, Error> {
+        use self::Setting::*;
+
+        debug_assert_eq!(head.kind(), crate::frame::Kind::Settings);
+
+        if !head.stream_id().is_zero() {
+            return Err(Error::InvalidStreamId);
+        }
+
+        // Load the flag
+        let flag = SettingsFlags::load(head.flag());
+
+        if flag.is_ack() {
+            // Ensure that the payload is empty
+            if !payload.is_empty() {
+                return Err(Error::InvalidPayloadLength);
+            }
+
+            // Return the ACK frame
+            return Ok(Settings::ack());
+        }
+
+        // Ensure the payload length is correct, each setting is 6 bytes long.
+        if payload.len() % 6 != 0 {
+            tracing::debug!("invalid settings payload length; len={:?}", payload.len());
+            return Err(Error::InvalidPayloadAckSettings);
+        }
+
+        let mut settings = Settings::default();
+        debug_assert!(!settings.flags.is_ack());
+
+        for raw in payload.chunks(6) {
+            match Setting::load(raw) {
+                Some(HeaderTableSize(val)) => {
+                    settings.header_table_size = Some(val);
+                }
+                Some(EnablePush(val)) => match val {
+                    0 | 1 => {
+                        settings.enable_push = Some(val);
+                    }
+                    _ => {
+                        return Err(Error::InvalidSettingValue);
+                    }
+                },
+                Some(MaxConcurrentStreams(val)) => {
+                    settings.max_concurrent_streams = Some(val);
+                }
+                Some(InitialWindowSize(val)) => {
+                    if val as usize > MAX_INITIAL_WINDOW_SIZE {
+                        return Err(Error::InvalidSettingValue);
+                    } else {
+                        settings.initial_window_size = Some(val);
+                    }
+                }
+                Some(MaxFrameSize(val)) => {
+                    if DEFAULT_MAX_FRAME_SIZE <= val && val <= MAX_MAX_FRAME_SIZE {
+                        settings.max_frame_size = Some(val);
+                    } else {
+                        return Err(Error::InvalidSettingValue);
+                    }
+                }
+                Some(MaxHeaderListSize(val)) => {
+                    settings.max_header_list_size = Some(val);
+                }
+                Some(EnableConnectProtocol(val)) => match val {
+                    0 | 1 => {
+                        settings.enable_connect_protocol = Some(val);
+                    }
+                    _ => {
+                        return Err(Error::InvalidSettingValue);
+                    }
+                },
+                None => {}
+            }
+        }
+
+        Ok(settings)
+    }
+
+    fn payload_len(&self) -> usize {
+        let mut len = 0;
+        self.for_each(|_| len += 6);
+        len
+    }
+
+    pub fn encode(&self, dst: &mut BytesMut) {
+        // Create & encode an appropriate frame head
+        let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero());
+        let payload_len = self.payload_len();
+
+        tracing::trace!("encoding SETTINGS; len={}", payload_len);
+
+        head.encode(payload_len, dst);
+
+        // Encode the settings
+        self.for_each(|setting| {
+            tracing::trace!("encoding setting; val={:?}", setting);
+            setting.encode(dst)
+        });
+    }
+
+    fn for_each<F: FnMut(Setting)>(&self, mut f: F) {
+        use self::Setting::*;
+
+        if let Some(v) = self.header_table_size {
+            f(HeaderTableSize(v));
+        }
+
+        if let Some(v) = self.enable_push {
+            f(EnablePush(v));
+        }
+
+        if let Some(v) = self.max_concurrent_streams {
+            f(MaxConcurrentStreams(v));
+        }
+
+        if let Some(v) = self.initial_window_size {
+            f(InitialWindowSize(v));
+        }
+
+        if let Some(v) = self.max_frame_size {
+            f(MaxFrameSize(v));
+        }
+
+        if let Some(v) = self.max_header_list_size {
+            f(MaxHeaderListSize(v));
+        }
+
+        if let Some(v) = self.enable_connect_protocol {
+            f(EnableConnectProtocol(v));
+        }
+    }
+}
+
+impl<T> From<Settings> for Frame<T> {
+    fn from(src: Settings) -> Frame<T> {
+        Frame::Settings(src)
+    }
+}
+
+impl fmt::Debug for Settings {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let mut builder = f.debug_struct("Settings");
+        builder.field("flags", &self.flags);
+
+        self.for_each(|setting| match setting {
+            Setting::EnablePush(v) => {
+                builder.field("enable_push", &v);
+            }
+            Setting::HeaderTableSize(v) => {
+                builder.field("header_table_size", &v);
+            }
+            Setting::InitialWindowSize(v) => {
+                builder.field("initial_window_size", &v);
+            }
+            Setting::MaxConcurrentStreams(v) => {
+                builder.field("max_concurrent_streams", &v);
+            }
+            Setting::MaxFrameSize(v) => {
+                builder.field("max_frame_size", &v);
+            }
+            Setting::MaxHeaderListSize(v) => {
+                builder.field("max_header_list_size", &v);
+            }
+            Setting::EnableConnectProtocol(v) => {
+                builder.field("enable_connect_protocol", &v);
+            }
+        });
+
+        builder.finish()
+    }
+}
+
+// ===== impl Setting =====
+
+impl Setting {
+    /// Creates a new `Setting` with the correct variant corresponding to the
+    /// given setting id, based on the settings IDs defined in section
+    /// 6.5.2.
+    pub fn from_id(id: u16, val: u32) -> Option<Setting> {
+        use self::Setting::*;
+
+        match id {
+            1 => Some(HeaderTableSize(val)),
+            2 => Some(EnablePush(val)),
+            3 => Some(MaxConcurrentStreams(val)),
+            4 => Some(InitialWindowSize(val)),
+            5 => Some(MaxFrameSize(val)),
+            6 => Some(MaxHeaderListSize(val)),
+            8 => Some(EnableConnectProtocol(val)),
+            _ => None,
+        }
+    }
+
+    /// Creates a new `Setting` by parsing the given buffer of 6 bytes, which
+    /// contains the raw byte representation of the setting, according to the
+    /// "SETTINGS format" defined in section 6.5.1.
+    ///
+    /// The `raw` parameter should have length at least 6 bytes, since the
+    /// length of the raw setting is exactly 6 bytes.
+    ///
+    /// # Panics
+    ///
+    /// If given a buffer shorter than 6 bytes, the function will panic.
+    fn load(raw: &[u8]) -> Option<Setting> {
+        let id: u16 = (u16::from(raw[0]) << 8) | u16::from(raw[1]);
+        let val: u32 = unpack_octets_4!(raw, 2, u32);
+
+        Setting::from_id(id, val)
+    }
+
+    fn encode(&self, dst: &mut BytesMut) {
+        use self::Setting::*;
+
+        let (kind, val) = match *self {
+            HeaderTableSize(v) => (1, v),
+            EnablePush(v) => (2, v),
+            MaxConcurrentStreams(v) => (3, v),
+            InitialWindowSize(v) => (4, v),
+            MaxFrameSize(v) => (5, v),
+            MaxHeaderListSize(v) => (6, v),
+            EnableConnectProtocol(v) => (8, v),
+        };
+
+        dst.put_u16(kind);
+        dst.put_u32(val);
+    }
+}
+
+// ===== impl SettingsFlags =====
+
+impl SettingsFlags {
+    pub fn empty() -> SettingsFlags {
+        SettingsFlags(0)
+    }
+
+    pub fn load(bits: u8) -> SettingsFlags {
+        SettingsFlags(bits & ALL)
+    }
+
+    pub fn ack() -> SettingsFlags {
+        SettingsFlags(ACK)
+    }
+
+    pub fn is_ack(&self) -> bool {
+        self.0 & ACK == ACK
+    }
+}
+
+impl From<SettingsFlags> for u8 {
+    fn from(src: SettingsFlags) -> u8 {
+        src.0
+    }
+}
+
+impl fmt::Debug for SettingsFlags {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        util::debug_flags(f, self.0)
+            .flag_if(self.is_ack(), "ACK")
+            .finish()
+    }
+}
diff --git a/src/frame/stream_id.rs b/src/frame/stream_id.rs
new file mode 100644
index 0000000..10a14d3
--- /dev/null
+++ b/src/frame/stream_id.rs
@@ -0,0 +1,96 @@
+use std::u32;
+
+/// A stream identifier, as described in [Section 5.1.1] of RFC 7540.
+///
+/// Streams are identified with an unsigned 31-bit integer. Streams
+/// initiated by a client MUST use odd-numbered stream identifiers; those
+/// initiated by the server MUST use even-numbered stream identifiers.  A
+/// stream identifier of zero (0x0) is used for connection control
+/// messages; the stream identifier of zero cannot be used to establish a
+/// new stream.
+///
+/// [Section 5.1.1]: https://tools.ietf.org/html/rfc7540#section-5.1.1
+#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+pub struct StreamId(u32);
+
+#[derive(Debug, Copy, Clone)]
+pub struct StreamIdOverflow;
+
+const STREAM_ID_MASK: u32 = 1 << 31;
+
+impl StreamId {
+    /// Stream ID 0.
+    pub const ZERO: StreamId = StreamId(0);
+
+    /// The maximum allowed stream ID.
+    pub const MAX: StreamId = StreamId(u32::MAX >> 1);
+
+    /// Parse the stream ID
+    #[inline]
+    pub fn parse(buf: &[u8]) -> (StreamId, bool) {
+        let mut ubuf = [0; 4];
+        ubuf.copy_from_slice(&buf[0..4]);
+        let unpacked = u32::from_be_bytes(ubuf);
+        let flag = unpacked & STREAM_ID_MASK == STREAM_ID_MASK;
+
+        // Now clear the most significant bit, as that is reserved and MUST be
+        // ignored when received.
+        (StreamId(unpacked & !STREAM_ID_MASK), flag)
+    }
+
+    /// Returns true if this stream ID corresponds to a stream that
+    /// was initiated by the client.
+    pub fn is_client_initiated(&self) -> bool {
+        let id = self.0;
+        id != 0 && id % 2 == 1
+    }
+
+    /// Returns true if this stream ID corresponds to a stream that
+    /// was initiated by the server.
+    pub fn is_server_initiated(&self) -> bool {
+        let id = self.0;
+        id != 0 && id % 2 == 0
+    }
+
+    /// Return a new `StreamId` for stream 0.
+    #[inline]
+    pub fn zero() -> StreamId {
+        StreamId::ZERO
+    }
+
+    /// Returns true if this stream ID is zero.
+    pub fn is_zero(&self) -> bool {
+        self.0 == 0
+    }
+
+    /// Returns the next stream ID initiated by the same peer as this stream
+    /// ID, or an error if incrementing this stream ID would overflow the
+    /// maximum.
+    pub fn next_id(&self) -> Result<StreamId, StreamIdOverflow> {
+        let next = self.0 + 2;
+        if next > StreamId::MAX.0 {
+            Err(StreamIdOverflow)
+        } else {
+            Ok(StreamId(next))
+        }
+    }
+}
+
+impl From<u32> for StreamId {
+    fn from(src: u32) -> Self {
+        assert_eq!(src & STREAM_ID_MASK, 0, "invalid stream ID -- MSB is set");
+        StreamId(src)
+    }
+}
+
+impl From<StreamId> for u32 {
+    fn from(src: StreamId) -> Self {
+        src.0
+    }
+}
+
+impl PartialEq<u32> for StreamId {
+    fn eq(&self, other: &u32) -> bool {
+        self.0 == *other
+    }
+}
diff --git a/src/frame/util.rs b/src/frame/util.rs
new file mode 100644
index 0000000..6bee7bd
--- /dev/null
+++ b/src/frame/util.rs
@@ -0,0 +1,79 @@
+use std::fmt;
+
+use super::Error;
+use bytes::Bytes;
+
+/// Strip padding from the given payload.
+///
+/// It is assumed that the frame had the padded flag set. This means that the
+/// first byte is the length of the padding with that many
+/// 0 bytes expected to follow the actual payload.
+///
+/// # Returns
+///
+/// A slice of the given payload where the actual one is found and the length
+/// of the padding.
+///
+/// If the padded payload is invalid (e.g. the length of the padding is equal
+/// to the total length), returns `None`.
+pub fn strip_padding(payload: &mut Bytes) -> Result<u8, Error> {
+    let payload_len = payload.len();
+    if payload_len == 0 {
+        // If this is the case, the frame is invalid as no padding length can be
+        // extracted, even though the frame should be padded.
+        return Err(Error::TooMuchPadding);
+    }
+
+    let pad_len = payload[0] as usize;
+
+    if pad_len >= payload_len {
+        // This is invalid: the padding length MUST be less than the
+        // total frame size.
+        return Err(Error::TooMuchPadding);
+    }
+
+    let _ = payload.split_to(1);
+    let _ = payload.split_off(payload_len - pad_len - 1);
+
+    Ok(pad_len as u8)
+}
+
+pub(super) fn debug_flags<'a, 'f: 'a>(
+    fmt: &'a mut fmt::Formatter<'f>,
+    bits: u8,
+) -> DebugFlags<'a, 'f> {
+    let result = write!(fmt, "({:#x}", bits);
+    DebugFlags {
+        fmt,
+        result,
+        started: false,
+    }
+}
+
+pub(super) struct DebugFlags<'a, 'f: 'a> {
+    fmt: &'a mut fmt::Formatter<'f>,
+    result: fmt::Result,
+    started: bool,
+}
+
+impl<'a, 'f: 'a> DebugFlags<'a, 'f> {
+    pub(super) fn flag_if(&mut self, enabled: bool, name: &str) -> &mut Self {
+        if enabled {
+            self.result = self.result.and_then(|()| {
+                let prefix = if self.started {
+                    " | "
+                } else {
+                    self.started = true;
+                    ": "
+                };
+
+                write!(self.fmt, "{}{}", prefix, name)
+            });
+        }
+        self
+    }
+
+    pub(super) fn finish(&mut self) -> fmt::Result {
+        self.result.and_then(|()| write!(self.fmt, ")"))
+    }
+}
diff --git a/src/frame/window_update.rs b/src/frame/window_update.rs
new file mode 100644
index 0000000..eed2ce1
--- /dev/null
+++ b/src/frame/window_update.rs
@@ -0,0 +1,62 @@
+use crate::frame::{self, Error, Head, Kind, StreamId};
+
+use bytes::BufMut;
+
+const SIZE_INCREMENT_MASK: u32 = 1 << 31;
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub struct WindowUpdate {
+    stream_id: StreamId,
+    size_increment: u32,
+}
+
+impl WindowUpdate {
+    pub fn new(stream_id: StreamId, size_increment: u32) -> WindowUpdate {
+        WindowUpdate {
+            stream_id,
+            size_increment,
+        }
+    }
+
+    pub fn stream_id(&self) -> StreamId {
+        self.stream_id
+    }
+
+    pub fn size_increment(&self) -> u32 {
+        self.size_increment
+    }
+
+    /// Builds a `WindowUpdate` frame from a raw frame.
+    pub fn load(head: Head, payload: &[u8]) -> Result<WindowUpdate, Error> {
+        debug_assert_eq!(head.kind(), crate::frame::Kind::WindowUpdate);
+        if payload.len() != 4 {
+            return Err(Error::BadFrameSize);
+        }
+
+        // Clear the most significant bit, as that is reserved and MUST be ignored
+        // when received.
+        let size_increment = unpack_octets_4!(payload, 0, u32) & !SIZE_INCREMENT_MASK;
+
+        if size_increment == 0 {
+            return Err(Error::InvalidWindowUpdateValue);
+        }
+
+        Ok(WindowUpdate {
+            stream_id: head.stream_id(),
+            size_increment,
+        })
+    }
+
+    pub fn encode<B: BufMut>(&self, dst: &mut B) {
+        tracing::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id);
+        let head = Head::new(Kind::WindowUpdate, 0, self.stream_id);
+        head.encode(4, dst);
+        dst.put_u32(self.size_increment);
+    }
+}
+
+impl<B> From<WindowUpdate> for frame::Frame<B> {
+    fn from(src: WindowUpdate) -> Self {
+        frame::Frame::WindowUpdate(src)
+    }
+}
diff --git a/src/fuzz_bridge.rs b/src/fuzz_bridge.rs
new file mode 100644
index 0000000..3ea8b59
--- /dev/null
+++ b/src/fuzz_bridge.rs
@@ -0,0 +1,28 @@
+#[cfg(fuzzing)]
+pub mod fuzz_logic {
+    use crate::hpack;
+    use bytes::BytesMut;
+    use http::header::HeaderName;
+    use std::io::Cursor;
+
+    pub fn fuzz_hpack(data_: &[u8]) {
+        let mut decoder_ = hpack::Decoder::new(0);
+        let mut buf = BytesMut::new();
+        buf.extend(data_);
+        let _dec_res = decoder_.decode(&mut Cursor::new(&mut buf), |_h| {});
+
+        if let Ok(s) = std::str::from_utf8(data_) {
+            if let Ok(h) = http::Method::from_bytes(s.as_bytes()) {
+                let m_ = hpack::Header::Method(h);
+                let mut encoder = hpack::Encoder::new(0, 0);
+                let _res = encode(&mut encoder, vec![m_]);
+            }
+        }
+    }
+
+    fn encode(e: &mut hpack::Encoder, hdrs: Vec<hpack::Header<Option<HeaderName>>>) -> BytesMut {
+        let mut dst = BytesMut::with_capacity(1024);
+        e.encode(&mut hdrs.into_iter(), &mut dst);
+        dst
+    }
+}
diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs
new file mode 100644
index 0000000..e48976c
--- /dev/null
+++ b/src/hpack/decoder.rs
@@ -0,0 +1,936 @@
+use super::{header::BytesStr, huffman, Header};
+use crate::frame;
+
+use bytes::{Buf, Bytes, BytesMut};
+use http::header;
+use http::method::{self, Method};
+use http::status::{self, StatusCode};
+
+use std::cmp;
+use std::collections::VecDeque;
+use std::io::Cursor;
+use std::str::Utf8Error;
+
+/// Decodes headers using HPACK
+#[derive(Debug)]
+pub struct Decoder {
+    // Protocol indicated that the max table size will update
+    max_size_update: Option<usize>,
+    last_max_update: usize,
+    table: Table,
+    buffer: BytesMut,
+}
+
+/// Represents all errors that can be encountered while performing the decoding
+/// of an HPACK header set.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum DecoderError {
+    InvalidRepresentation,
+    InvalidIntegerPrefix,
+    InvalidTableIndex,
+    InvalidHuffmanCode,
+    InvalidUtf8,
+    InvalidStatusCode,
+    InvalidPseudoheader,
+    InvalidMaxDynamicSize,
+    IntegerOverflow,
+    NeedMore(NeedMore),
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum NeedMore {
+    UnexpectedEndOfStream,
+    IntegerUnderflow,
+    StringUnderflow,
+}
+
+enum Representation {
+    /// Indexed header field representation
+    ///
+    /// An indexed header field representation identifies an entry in either the
+    /// static table or the dynamic table (see Section 2.3).
+    ///
+    /// # Header encoding
+    ///
+    /// ```text
+    ///   0   1   2   3   4   5   6   7
+    /// +---+---+---+---+---+---+---+---+
+    /// | 1 |        Index (7+)         |
+    /// +---+---------------------------+
+    /// ```
+    Indexed,
+
+    /// Literal Header Field with Incremental Indexing
+    ///
+    /// A literal header field with incremental indexing representation results
+    /// in appending a header field to the decoded header list and inserting it
+    /// as a new entry into the dynamic table.
+    ///
+    /// # Header encoding
+    ///
+    /// ```text
+    ///   0   1   2   3   4   5   6   7
+    /// +---+---+---+---+---+---+---+---+
+    /// | 0 | 1 |      Index (6+)       |
+    /// +---+---+-----------------------+
+    /// | H |     Value Length (7+)     |
+    /// +---+---------------------------+
+    /// | Value String (Length octets)  |
+    /// +-------------------------------+
+    /// ```
+    LiteralWithIndexing,
+
+    /// Literal Header Field without Indexing
+    ///
+    /// A literal header field without indexing representation results in
+    /// appending a header field to the decoded header list without altering the
+    /// dynamic table.
+    ///
+    /// # Header encoding
+    ///
+    /// ```text
+    ///   0   1   2   3   4   5   6   7
+    /// +---+---+---+---+---+---+---+---+
+    /// | 0 | 0 | 0 | 0 |  Index (4+)   |
+    /// +---+---+-----------------------+
+    /// | H |     Value Length (7+)     |
+    /// +---+---------------------------+
+    /// | Value String (Length octets)  |
+    /// +-------------------------------+
+    /// ```
+    LiteralWithoutIndexing,
+
+    /// Literal Header Field Never Indexed
+    ///
+    /// A literal header field never-indexed representation results in appending
+    /// a header field to the decoded header list without altering the dynamic
+    /// table. Intermediaries MUST use the same representation for encoding this
+    /// header field.
+    ///
+    /// ```text
+    ///   0   1   2   3   4   5   6   7
+    /// +---+---+---+---+---+---+---+---+
+    /// | 0 | 0 | 0 | 1 |  Index (4+)   |
+    /// +---+---+-----------------------+
+    /// | H |     Value Length (7+)     |
+    /// +---+---------------------------+
+    /// | Value String (Length octets)  |
+    /// +-------------------------------+
+    /// ```
+    LiteralNeverIndexed,
+
+    /// Dynamic Table Size Update
+    ///
+    /// A dynamic table size update signals a change to the size of the dynamic
+    /// table.
+    ///
+    /// # Header encoding
+    ///
+    /// ```text
+    ///   0   1   2   3   4   5   6   7
+    /// +---+---+---+---+---+---+---+---+
+    /// | 0 | 0 | 1 |   Max size (5+)   |
+    /// +---+---------------------------+
+    /// ```
+    SizeUpdate,
+}
+
+#[derive(Debug)]
+struct Table {
+    entries: VecDeque<Header>,
+    size: usize,
+    max_size: usize,
+}
+
+struct StringMarker {
+    offset: usize,
+    len: usize,
+    string: Option<Bytes>,
+}
+
+// ===== impl Decoder =====
+
+impl Decoder {
+    /// Creates a new `Decoder` with all settings set to default values.
+    pub fn new(size: usize) -> Decoder {
+        Decoder {
+            max_size_update: None,
+            last_max_update: size,
+            table: Table::new(size),
+            buffer: BytesMut::with_capacity(4096),
+        }
+    }
+
+    /// Queues a potential size update
+    #[allow(dead_code)]
+    pub fn queue_size_update(&mut self, size: usize) {
+        let size = match self.max_size_update {
+            Some(v) => cmp::max(v, size),
+            None => size,
+        };
+
+        self.max_size_update = Some(size);
+    }
+
+    /// Decodes the headers found in the given buffer.
+    pub fn decode<F>(
+        &mut self,
+        src: &mut Cursor<&mut BytesMut>,
+        mut f: F,
+    ) -> Result<(), DecoderError>
+    where
+        F: FnMut(Header),
+    {
+        use self::Representation::*;
+
+        let mut can_resize = true;
+
+        if let Some(size) = self.max_size_update.take() {
+            self.last_max_update = size;
+        }
+
+        let span = tracing::trace_span!("hpack::decode");
+        let _e = span.enter();
+
+        tracing::trace!("decode");
+
+        while let Some(ty) = peek_u8(src) {
+            // At this point we are always at the beginning of the next block
+            // within the HPACK data. The type of the block can always be
+            // determined from the first byte.
+            match Representation::load(ty)? {
+                Indexed => {
+                    tracing::trace!(rem = src.remaining(), kind = %"Indexed");
+                    can_resize = false;
+                    let entry = self.decode_indexed(src)?;
+                    consume(src);
+                    f(entry);
+                }
+                LiteralWithIndexing => {
+                    tracing::trace!(rem = src.remaining(), kind = %"LiteralWithIndexing");
+                    can_resize = false;
+                    let entry = self.decode_literal(src, true)?;
+
+                    // Insert the header into the table
+                    self.table.insert(entry.clone());
+                    consume(src);
+
+                    f(entry);
+                }
+                LiteralWithoutIndexing => {
+                    tracing::trace!(rem = src.remaining(), kind = %"LiteralWithoutIndexing");
+                    can_resize = false;
+                    let entry = self.decode_literal(src, false)?;
+                    consume(src);
+                    f(entry);
+                }
+                LiteralNeverIndexed => {
+                    tracing::trace!(rem = src.remaining(), kind = %"LiteralNeverIndexed");
+                    can_resize = false;
+                    let entry = self.decode_literal(src, false)?;
+                    consume(src);
+
+                    // TODO: Track that this should never be indexed
+
+                    f(entry);
+                }
+                SizeUpdate => {
+                    tracing::trace!(rem = src.remaining(), kind = %"SizeUpdate");
+                    if !can_resize {
+                        return Err(DecoderError::InvalidMaxDynamicSize);
+                    }
+
+                    // Handle the dynamic table size update
+                    self.process_size_update(src)?;
+                    consume(src);
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    fn process_size_update(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result<(), DecoderError> {
+        let new_size = decode_int(buf, 5)?;
+
+        if new_size > self.last_max_update {
+            return Err(DecoderError::InvalidMaxDynamicSize);
+        }
+
+        tracing::debug!(
+            from = self.table.size(),
+            to = new_size,
+            "Decoder changed max table size"
+        );
+
+        self.table.set_max_size(new_size);
+
+        Ok(())
+    }
+
+    fn decode_indexed(&self, buf: &mut Cursor<&mut BytesMut>) -> Result<Header, DecoderError> {
+        let index = decode_int(buf, 7)?;
+        self.table.get(index)
+    }
+
+    fn decode_literal(
+        &mut self,
+        buf: &mut Cursor<&mut BytesMut>,
+        index: bool,
+    ) -> Result<Header, DecoderError> {
+        let prefix = if index { 6 } else { 4 };
+
+        // Extract the table index for the name, or 0 if not indexed
+        let table_idx = decode_int(buf, prefix)?;
+
+        // First, read the header name
+        if table_idx == 0 {
+            let old_pos = buf.position();
+            let name_marker = self.try_decode_string(buf)?;
+            let value_marker = self.try_decode_string(buf)?;
+            buf.set_position(old_pos);
+            // Read the name as a literal
+            let name = name_marker.consume(buf);
+            let value = value_marker.consume(buf);
+            Header::new(name, value)
+        } else {
+            let e = self.table.get(table_idx)?;
+            let value = self.decode_string(buf)?;
+
+            e.name().into_entry(value)
+        }
+    }
+
+    fn try_decode_string(
+        &mut self,
+        buf: &mut Cursor<&mut BytesMut>,
+    ) -> Result<StringMarker, DecoderError> {
+        let old_pos = buf.position();
+        const HUFF_FLAG: u8 = 0b1000_0000;
+
+        // The first bit in the first byte contains the huffman encoded flag.
+        let huff = match peek_u8(buf) {
+            Some(hdr) => (hdr & HUFF_FLAG) == HUFF_FLAG,
+            None => return Err(DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)),
+        };
+
+        // Decode the string length using 7 bit prefix
+        let len = decode_int(buf, 7)?;
+
+        if len > buf.remaining() {
+            tracing::trace!(len, remaining = buf.remaining(), "decode_string underflow",);
+            return Err(DecoderError::NeedMore(NeedMore::StringUnderflow));
+        }
+
+        let offset = (buf.position() - old_pos) as usize;
+        if huff {
+            let ret = {
+                let raw = &buf.chunk()[..len];
+                huffman::decode(raw, &mut self.buffer).map(|buf| StringMarker {
+                    offset,
+                    len,
+                    string: Some(BytesMut::freeze(buf)),
+                })
+            };
+
+            buf.advance(len);
+            ret
+        } else {
+            buf.advance(len);
+            Ok(StringMarker {
+                offset,
+                len,
+                string: None,
+            })
+        }
+    }
+
+    fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result<Bytes, DecoderError> {
+        let old_pos = buf.position();
+        let marker = self.try_decode_string(buf)?;
+        buf.set_position(old_pos);
+        Ok(marker.consume(buf))
+    }
+}
+
+impl Default for Decoder {
+    fn default() -> Decoder {
+        Decoder::new(4096)
+    }
+}
+
+// ===== impl Representation =====
+
+impl Representation {
+    pub fn load(byte: u8) -> Result<Representation, DecoderError> {
+        const INDEXED: u8 = 0b1000_0000;
+        const LITERAL_WITH_INDEXING: u8 = 0b0100_0000;
+        const LITERAL_WITHOUT_INDEXING: u8 = 0b1111_0000;
+        const LITERAL_NEVER_INDEXED: u8 = 0b0001_0000;
+        const SIZE_UPDATE_MASK: u8 = 0b1110_0000;
+        const SIZE_UPDATE: u8 = 0b0010_0000;
+
+        // TODO: What did I even write here?
+
+        if byte & INDEXED == INDEXED {
+            Ok(Representation::Indexed)
+        } else if byte & LITERAL_WITH_INDEXING == LITERAL_WITH_INDEXING {
+            Ok(Representation::LiteralWithIndexing)
+        } else if byte & LITERAL_WITHOUT_INDEXING == 0 {
+            Ok(Representation::LiteralWithoutIndexing)
+        } else if byte & LITERAL_WITHOUT_INDEXING == LITERAL_NEVER_INDEXED {
+            Ok(Representation::LiteralNeverIndexed)
+        } else if byte & SIZE_UPDATE_MASK == SIZE_UPDATE {
+            Ok(Representation::SizeUpdate)
+        } else {
+            Err(DecoderError::InvalidRepresentation)
+        }
+    }
+}
+
+fn decode_int<B: Buf>(buf: &mut B, prefix_size: u8) -> Result<usize, DecoderError> {
+    // The octet limit is chosen such that the maximum allowed *value* can
+    // never overflow an unsigned 32-bit integer. The maximum value of any
+    // integer that can be encoded with 5 octets is ~2^28
+    const MAX_BYTES: usize = 5;
+    const VARINT_MASK: u8 = 0b0111_1111;
+    const VARINT_FLAG: u8 = 0b1000_0000;
+
+    if prefix_size < 1 || prefix_size > 8 {
+        return Err(DecoderError::InvalidIntegerPrefix);
+    }
+
+    if !buf.has_remaining() {
+        return Err(DecoderError::NeedMore(NeedMore::IntegerUnderflow));
+    }
+
+    let mask = if prefix_size == 8 {
+        0xFF
+    } else {
+        (1u8 << prefix_size).wrapping_sub(1)
+    };
+
+    let mut ret = (buf.get_u8() & mask) as usize;
+
+    if ret < mask as usize {
+        // Value fits in the prefix bits
+        return Ok(ret);
+    }
+
+    // The int did not fit in the prefix bits, so continue reading.
+    //
+    // The total number of bytes used to represent the int. The first byte was
+    // the prefix, so start at 1.
+    let mut bytes = 1;
+
+    // The rest of the int is stored as a varint -- 7 bits for the value and 1
+    // bit to indicate if it is the last byte.
+    let mut shift = 0;
+
+    while buf.has_remaining() {
+        let b = buf.get_u8();
+
+        bytes += 1;
+        ret += ((b & VARINT_MASK) as usize) << shift;
+        shift += 7;
+
+        if b & VARINT_FLAG == 0 {
+            return Ok(ret);
+        }
+
+        if bytes == MAX_BYTES {
+            // The spec requires that this situation is an error
+            return Err(DecoderError::IntegerOverflow);
+        }
+    }
+
+    Err(DecoderError::NeedMore(NeedMore::IntegerUnderflow))
+}
+
+fn peek_u8<B: Buf>(buf: &B) -> Option<u8> {
+    if buf.has_remaining() {
+        Some(buf.chunk()[0])
+    } else {
+        None
+    }
+}
+
+fn take(buf: &mut Cursor<&mut BytesMut>, n: usize) -> Bytes {
+    let pos = buf.position() as usize;
+    let mut head = buf.get_mut().split_to(pos + n);
+    buf.set_position(0);
+    head.advance(pos);
+    head.freeze()
+}
+
+impl StringMarker {
+    fn consume(self, buf: &mut Cursor<&mut BytesMut>) -> Bytes {
+        buf.advance(self.offset);
+        match self.string {
+            Some(string) => {
+                buf.advance(self.len);
+                string
+            }
+            None => take(buf, self.len),
+        }
+    }
+}
+
+fn consume(buf: &mut Cursor<&mut BytesMut>) {
+    // remove bytes from the internal BytesMut when they have been successfully
+    // decoded. This is a more permanent cursor position, which will be
+    // used to resume if decoding was only partial.
+    take(buf, 0);
+}
+
+// ===== impl Table =====
+
+impl Table {
+    fn new(max_size: usize) -> Table {
+        Table {
+            entries: VecDeque::new(),
+            size: 0,
+            max_size,
+        }
+    }
+
+    fn size(&self) -> usize {
+        self.size
+    }
+
+    /// Returns the entry located at the given index.
+    ///
+    /// The table is 1-indexed and constructed in such a way that the first
+    /// entries belong to the static table, followed by entries in the dynamic
+    /// table. They are merged into a single index address space, though.
+    ///
+    /// This is according to the [HPACK spec, section 2.3.3.]
+    /// (http://http2.github.io/http2-spec/compression.html#index.address.space)
+    pub fn get(&self, index: usize) -> Result<Header, DecoderError> {
+        if index == 0 {
+            return Err(DecoderError::InvalidTableIndex);
+        }
+
+        if index <= 61 {
+            return Ok(get_static(index));
+        }
+
+        // Convert the index for lookup in the entries structure.
+        match self.entries.get(index - 62) {
+            Some(e) => Ok(e.clone()),
+            None => Err(DecoderError::InvalidTableIndex),
+        }
+    }
+
+    fn insert(&mut self, entry: Header) {
+        let len = entry.len();
+
+        self.reserve(len);
+
+        if self.size + len <= self.max_size {
+            self.size += len;
+
+            // Track the entry
+            self.entries.push_front(entry);
+        }
+    }
+
+    fn set_max_size(&mut self, size: usize) {
+        self.max_size = size;
+        // Make the table size fit within the new constraints.
+        self.consolidate();
+    }
+
+    fn reserve(&mut self, size: usize) {
+        while self.size + size > self.max_size {
+            match self.entries.pop_back() {
+                Some(last) => {
+                    self.size -= last.len();
+                }
+                None => return,
+            }
+        }
+    }
+
+    fn consolidate(&mut self) {
+        while self.size > self.max_size {
+            {
+                let last = match self.entries.back() {
+                    Some(x) => x,
+                    None => {
+                        // Can never happen as the size of the table must reach
+                        // 0 by the time we've exhausted all elements.
+                        panic!("Size of table != 0, but no headers left!");
+                    }
+                };
+
+                self.size -= last.len();
+            }
+
+            self.entries.pop_back();
+        }
+    }
+}
+
+// ===== impl DecoderError =====
+
+impl From<Utf8Error> for DecoderError {
+    fn from(_: Utf8Error) -> DecoderError {
+        // TODO: Better error?
+        DecoderError::InvalidUtf8
+    }
+}
+
+impl From<header::InvalidHeaderValue> for DecoderError {
+    fn from(_: header::InvalidHeaderValue) -> DecoderError {
+        // TODO: Better error?
+        DecoderError::InvalidUtf8
+    }
+}
+
+impl From<header::InvalidHeaderName> for DecoderError {
+    fn from(_: header::InvalidHeaderName) -> DecoderError {
+        // TODO: Better error
+        DecoderError::InvalidUtf8
+    }
+}
+
+impl From<method::InvalidMethod> for DecoderError {
+    fn from(_: method::InvalidMethod) -> DecoderError {
+        // TODO: Better error
+        DecoderError::InvalidUtf8
+    }
+}
+
+impl From<status::InvalidStatusCode> for DecoderError {
+    fn from(_: status::InvalidStatusCode) -> DecoderError {
+        // TODO: Better error
+        DecoderError::InvalidUtf8
+    }
+}
+
+impl From<DecoderError> for frame::Error {
+    fn from(src: DecoderError) -> Self {
+        frame::Error::Hpack(src)
+    }
+}
+
+/// Get an entry from the static table
+pub fn get_static(idx: usize) -> Header {
+    use http::header::HeaderValue;
+
+    match idx {
+        1 => Header::Authority(BytesStr::from_static("")),
+        2 => Header::Method(Method::GET),
+        3 => Header::Method(Method::POST),
+        4 => Header::Path(BytesStr::from_static("/")),
+        5 => Header::Path(BytesStr::from_static("/index.html")),
+        6 => Header::Scheme(BytesStr::from_static("http")),
+        7 => Header::Scheme(BytesStr::from_static("https")),
+        8 => Header::Status(StatusCode::OK),
+        9 => Header::Status(StatusCode::NO_CONTENT),
+        10 => Header::Status(StatusCode::PARTIAL_CONTENT),
+        11 => Header::Status(StatusCode::NOT_MODIFIED),
+        12 => Header::Status(StatusCode::BAD_REQUEST),
+        13 => Header::Status(StatusCode::NOT_FOUND),
+        14 => Header::Status(StatusCode::INTERNAL_SERVER_ERROR),
+        15 => Header::Field {
+            name: header::ACCEPT_CHARSET,
+            value: HeaderValue::from_static(""),
+        },
+        16 => Header::Field {
+            name: header::ACCEPT_ENCODING,
+            value: HeaderValue::from_static("gzip, deflate"),
+        },
+        17 => Header::Field {
+            name: header::ACCEPT_LANGUAGE,
+            value: HeaderValue::from_static(""),
+        },
+        18 => Header::Field {
+            name: header::ACCEPT_RANGES,
+            value: HeaderValue::from_static(""),
+        },
+        19 => Header::Field {
+            name: header::ACCEPT,
+            value: HeaderValue::from_static(""),
+        },
+        20 => Header::Field {
+            name: header::ACCESS_CONTROL_ALLOW_ORIGIN,
+            value: HeaderValue::from_static(""),
+        },
+        21 => Header::Field {
+            name: header::AGE,
+            value: HeaderValue::from_static(""),
+        },
+        22 => Header::Field {
+            name: header::ALLOW,
+            value: HeaderValue::from_static(""),
+        },
+        23 => Header::Field {
+            name: header::AUTHORIZATION,
+            value: HeaderValue::from_static(""),
+        },
+        24 => Header::Field {
+            name: header::CACHE_CONTROL,
+            value: HeaderValue::from_static(""),
+        },
+        25 => Header::Field {
+            name: header::CONTENT_DISPOSITION,
+            value: HeaderValue::from_static(""),
+        },
+        26 => Header::Field {
+            name: header::CONTENT_ENCODING,
+            value: HeaderValue::from_static(""),
+        },
+        27 => Header::Field {
+            name: header::CONTENT_LANGUAGE,
+            value: HeaderValue::from_static(""),
+        },
+        28 => Header::Field {
+            name: header::CONTENT_LENGTH,
+            value: HeaderValue::from_static(""),
+        },
+        29 => Header::Field {
+            name: header::CONTENT_LOCATION,
+            value: HeaderValue::from_static(""),
+        },
+        30 => Header::Field {
+            name: header::CONTENT_RANGE,
+            value: HeaderValue::from_static(""),
+        },
+        31 => Header::Field {
+            name: header::CONTENT_TYPE,
+            value: HeaderValue::from_static(""),
+        },
+        32 => Header::Field {
+            name: header::COOKIE,
+            value: HeaderValue::from_static(""),
+        },
+        33 => Header::Field {
+            name: header::DATE,
+            value: HeaderValue::from_static(""),
+        },
+        34 => Header::Field {
+            name: header::ETAG,
+            value: HeaderValue::from_static(""),
+        },
+        35 => Header::Field {
+            name: header::EXPECT,
+            value: HeaderValue::from_static(""),
+        },
+        36 => Header::Field {
+            name: header::EXPIRES,
+            value: HeaderValue::from_static(""),
+        },
+        37 => Header::Field {
+            name: header::FROM,
+            value: HeaderValue::from_static(""),
+        },
+        38 => Header::Field {
+            name: header::HOST,
+            value: HeaderValue::from_static(""),
+        },
+        39 => Header::Field {
+            name: header::IF_MATCH,
+            value: HeaderValue::from_static(""),
+        },
+        40 => Header::Field {
+            name: header::IF_MODIFIED_SINCE,
+            value: HeaderValue::from_static(""),
+        },
+        41 => Header::Field {
+            name: header::IF_NONE_MATCH,
+            value: HeaderValue::from_static(""),
+        },
+        42 => Header::Field {
+            name: header::IF_RANGE,
+            value: HeaderValue::from_static(""),
+        },
+        43 => Header::Field {
+            name: header::IF_UNMODIFIED_SINCE,
+            value: HeaderValue::from_static(""),
+        },
+        44 => Header::Field {
+            name: header::LAST_MODIFIED,
+            value: HeaderValue::from_static(""),
+        },
+        45 => Header::Field {
+            name: header::LINK,
+            value: HeaderValue::from_static(""),
+        },
+        46 => Header::Field {
+            name: header::LOCATION,
+            value: HeaderValue::from_static(""),
+        },
+        47 => Header::Field {
+            name: header::MAX_FORWARDS,
+            value: HeaderValue::from_static(""),
+        },
+        48 => Header::Field {
+            name: header::PROXY_AUTHENTICATE,
+            value: HeaderValue::from_static(""),
+        },
+        49 => Header::Field {
+            name: header::PROXY_AUTHORIZATION,
+            value: HeaderValue::from_static(""),
+        },
+        50 => Header::Field {
+            name: header::RANGE,
+            value: HeaderValue::from_static(""),
+        },
+        51 => Header::Field {
+            name: header::REFERER,
+            value: HeaderValue::from_static(""),
+        },
+        52 => Header::Field {
+            name: header::REFRESH,
+            value: HeaderValue::from_static(""),
+        },
+        53 => Header::Field {
+            name: header::RETRY_AFTER,
+            value: HeaderValue::from_static(""),
+        },
+        54 => Header::Field {
+            name: header::SERVER,
+            value: HeaderValue::from_static(""),
+        },
+        55 => Header::Field {
+            name: header::SET_COOKIE,
+            value: HeaderValue::from_static(""),
+        },
+        56 => Header::Field {
+            name: header::STRICT_TRANSPORT_SECURITY,
+            value: HeaderValue::from_static(""),
+        },
+        57 => Header::Field {
+            name: header::TRANSFER_ENCODING,
+            value: HeaderValue::from_static(""),
+        },
+        58 => Header::Field {
+            name: header::USER_AGENT,
+            value: HeaderValue::from_static(""),
+        },
+        59 => Header::Field {
+            name: header::VARY,
+            value: HeaderValue::from_static(""),
+        },
+        60 => Header::Field {
+            name: header::VIA,
+            value: HeaderValue::from_static(""),
+        },
+        61 => Header::Field {
+            name: header::WWW_AUTHENTICATE,
+            value: HeaderValue::from_static(""),
+        },
+        _ => unreachable!(),
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+
+    #[test]
+    fn test_peek_u8() {
+        let b = 0xff;
+        let mut buf = Cursor::new(vec![b]);
+        assert_eq!(peek_u8(&buf), Some(b));
+        assert_eq!(buf.get_u8(), b);
+        assert_eq!(peek_u8(&buf), None);
+    }
+
+    #[test]
+    fn test_decode_string_empty() {
+        let mut de = Decoder::new(0);
+        let mut buf = BytesMut::new();
+        let err = de.decode_string(&mut Cursor::new(&mut buf)).unwrap_err();
+        assert_eq!(err, DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream));
+    }
+
+    #[test]
+    fn test_decode_empty() {
+        let mut de = Decoder::new(0);
+        let mut buf = BytesMut::new();
+        let _: () = de.decode(&mut Cursor::new(&mut buf), |_| {}).unwrap();
+    }
+
+    #[test]
+    fn test_decode_indexed_larger_than_table() {
+        let mut de = Decoder::new(0);
+
+        let mut buf = BytesMut::new();
+        buf.extend([0b01000000, 0x80 | 2]);
+        buf.extend(huff_encode(b"foo"));
+        buf.extend([0x80 | 3]);
+        buf.extend(huff_encode(b"bar"));
+
+        let mut res = vec![];
+        de.decode(&mut Cursor::new(&mut buf), |h| {
+            res.push(h);
+        })
+        .unwrap();
+
+        assert_eq!(res.len(), 1);
+        assert_eq!(de.table.size(), 0);
+
+        match res[0] {
+            Header::Field {
+                ref name,
+                ref value,
+            } => {
+                assert_eq!(name, "foo");
+                assert_eq!(value, "bar");
+            }
+            _ => panic!(),
+        }
+    }
+
+    fn huff_encode(src: &[u8]) -> BytesMut {
+        let mut buf = BytesMut::new();
+        huffman::encode(src, &mut buf);
+        buf
+    }
+
+    #[test]
+    fn test_decode_continuation_header_with_non_huff_encoded_name() {
+        let mut de = Decoder::new(0);
+        let value = huff_encode(b"bar");
+        let mut buf = BytesMut::new();
+        // header name is non_huff encoded
+        buf.extend([0b01000000, 3]);
+        buf.extend(b"foo");
+        // header value is partial
+        buf.extend([0x80 | 3]);
+        buf.extend(&value[0..1]);
+
+        let mut res = vec![];
+        let e = de
+            .decode(&mut Cursor::new(&mut buf), |h| {
+                res.push(h);
+            })
+            .unwrap_err();
+        // decode error because the header value is partial
+        assert_eq!(e, DecoderError::NeedMore(NeedMore::StringUnderflow));
+
+        // extend buf with the remaining header value
+        buf.extend(&value[1..]);
+        de.decode(&mut Cursor::new(&mut buf), |h| {
+            res.push(h);
+        })
+        .unwrap();
+
+        assert_eq!(res.len(), 1);
+        assert_eq!(de.table.size(), 0);
+
+        match res[0] {
+            Header::Field {
+                ref name,
+                ref value,
+            } => {
+                assert_eq!(name, "foo");
+                assert_eq!(value, "bar");
+            }
+            _ => panic!(),
+        }
+    }
+}
diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs
new file mode 100644
index 0000000..bd49056
--- /dev/null
+++ b/src/hpack/encoder.rs
@@ -0,0 +1,720 @@
+use super::table::{Index, Table};
+use super::{huffman, Header};
+
+use bytes::{BufMut, BytesMut};
+use http::header::{HeaderName, HeaderValue};
+
+#[derive(Debug)]
+pub struct Encoder {
+    table: Table,
+    size_update: Option<SizeUpdate>,
+}
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+enum SizeUpdate {
+    One(usize),
+    Two(usize, usize), // min, max
+}
+
+impl Encoder {
+    pub fn new(max_size: usize, capacity: usize) -> Encoder {
+        Encoder {
+            table: Table::new(max_size, capacity),
+            size_update: None,
+        }
+    }
+
+    /// Queues a max size update.
+    ///
+    /// The next call to `encode` will include a dynamic size update frame.
+    pub fn update_max_size(&mut self, val: usize) {
+        match self.size_update {
+            Some(SizeUpdate::One(old)) => {
+                if val > old {
+                    if old > self.table.max_size() {
+                        self.size_update = Some(SizeUpdate::One(val));
+                    } else {
+                        self.size_update = Some(SizeUpdate::Two(old, val));
+                    }
+                } else {
+                    self.size_update = Some(SizeUpdate::One(val));
+                }
+            }
+            Some(SizeUpdate::Two(min, _)) => {
+                if val < min {
+                    self.size_update = Some(SizeUpdate::One(val));
+                } else {
+                    self.size_update = Some(SizeUpdate::Two(min, val));
+                }
+            }
+            None => {
+                if val != self.table.max_size() {
+                    // Don't bother writing a frame if the value already matches
+                    // the table's max size.
+                    self.size_update = Some(SizeUpdate::One(val));
+                }
+            }
+        }
+    }
+
+    /// Encode a set of headers into the provide buffer
+    pub fn encode<I>(&mut self, headers: I, dst: &mut BytesMut)
+    where
+        I: IntoIterator<Item = Header<Option<HeaderName>>>,
+    {
+        let span = tracing::trace_span!("hpack::encode");
+        let _e = span.enter();
+
+        self.encode_size_updates(dst);
+
+        let mut last_index = None;
+
+        for header in headers {
+            match header.reify() {
+                // The header has an associated name. In which case, try to
+                // index it in the table.
+                Ok(header) => {
+                    let index = self.table.index(header);
+                    self.encode_header(&index, dst);
+
+                    last_index = Some(index);
+                }
+                // The header does not have an associated name. This means that
+                // the name is the same as the previously yielded header. In
+                // which case, we skip table lookup and just use the same index
+                // as the previous entry.
+                Err(value) => {
+                    self.encode_header_without_name(
+                        last_index.as_ref().unwrap_or_else(|| {
+                            panic!("encoding header without name, but no previous index to use for name");
+                        }),
+                        &value,
+                        dst,
+                    );
+                }
+            }
+        }
+    }
+
+    fn encode_size_updates(&mut self, dst: &mut BytesMut) {
+        match self.size_update.take() {
+            Some(SizeUpdate::One(val)) => {
+                self.table.resize(val);
+                encode_size_update(val, dst);
+            }
+            Some(SizeUpdate::Two(min, max)) => {
+                self.table.resize(min);
+                self.table.resize(max);
+                encode_size_update(min, dst);
+                encode_size_update(max, dst);
+            }
+            None => {}
+        }
+    }
+
+    fn encode_header(&mut self, index: &Index, dst: &mut BytesMut) {
+        match *index {
+            Index::Indexed(idx, _) => {
+                encode_int(idx, 7, 0x80, dst);
+            }
+            Index::Name(idx, _) => {
+                let header = self.table.resolve(index);
+
+                encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst);
+            }
+            Index::Inserted(_) => {
+                let header = self.table.resolve(index);
+
+                assert!(!header.is_sensitive());
+
+                dst.put_u8(0b0100_0000);
+
+                encode_str(header.name().as_slice(), dst);
+                encode_str(header.value_slice(), dst);
+            }
+            Index::InsertedValue(idx, _) => {
+                let header = self.table.resolve(index);
+
+                assert!(!header.is_sensitive());
+
+                encode_int(idx, 6, 0b0100_0000, dst);
+                encode_str(header.value_slice(), dst);
+            }
+            Index::NotIndexed(_) => {
+                let header = self.table.resolve(index);
+
+                encode_not_indexed2(
+                    header.name().as_slice(),
+                    header.value_slice(),
+                    header.is_sensitive(),
+                    dst,
+                );
+            }
+        }
+    }
+
+    fn encode_header_without_name(
+        &mut self,
+        last: &Index,
+        value: &HeaderValue,
+        dst: &mut BytesMut,
+    ) {
+        match *last {
+            Index::Indexed(..)
+            | Index::Name(..)
+            | Index::Inserted(..)
+            | Index::InsertedValue(..) => {
+                let idx = self.table.resolve_idx(last);
+
+                encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst);
+            }
+            Index::NotIndexed(_) => {
+                let last = self.table.resolve(last);
+
+                encode_not_indexed2(
+                    last.name().as_slice(),
+                    value.as_ref(),
+                    value.is_sensitive(),
+                    dst,
+                );
+            }
+        }
+    }
+}
+
+impl Default for Encoder {
+    fn default() -> Encoder {
+        Encoder::new(4096, 0)
+    }
+}
+
+fn encode_size_update(val: usize, dst: &mut BytesMut) {
+    encode_int(val, 5, 0b0010_0000, dst)
+}
+
+fn encode_not_indexed(name: usize, value: &[u8], sensitive: bool, dst: &mut BytesMut) {
+    if sensitive {
+        encode_int(name, 4, 0b10000, dst);
+    } else {
+        encode_int(name, 4, 0, dst);
+    }
+
+    encode_str(value, dst);
+}
+
+fn encode_not_indexed2(name: &[u8], value: &[u8], sensitive: bool, dst: &mut BytesMut) {
+    if sensitive {
+        dst.put_u8(0b10000);
+    } else {
+        dst.put_u8(0);
+    }
+
+    encode_str(name, dst);
+    encode_str(value, dst);
+}
+
+fn encode_str(val: &[u8], dst: &mut BytesMut) {
+    if !val.is_empty() {
+        let idx = position(dst);
+
+        // Push a placeholder byte for the length header
+        dst.put_u8(0);
+
+        // Encode with huffman
+        huffman::encode(val, dst);
+
+        let huff_len = position(dst) - (idx + 1);
+
+        if encode_int_one_byte(huff_len, 7) {
+            // Write the string head
+            dst[idx] = 0x80 | huff_len as u8;
+        } else {
+            // Write the head to a placeholder
+            const PLACEHOLDER_LEN: usize = 8;
+            let mut buf = [0u8; PLACEHOLDER_LEN];
+
+            let head_len = {
+                let mut head_dst = &mut buf[..];
+                encode_int(huff_len, 7, 0x80, &mut head_dst);
+                PLACEHOLDER_LEN - head_dst.remaining_mut()
+            };
+
+            // This is just done to reserve space in the destination
+            dst.put_slice(&buf[1..head_len]);
+
+            // Shift the header forward
+            for i in 0..huff_len {
+                let src_i = idx + 1 + (huff_len - (i + 1));
+                let dst_i = idx + head_len + (huff_len - (i + 1));
+                dst[dst_i] = dst[src_i];
+            }
+
+            // Copy in the head
+            for i in 0..head_len {
+                dst[idx + i] = buf[i];
+            }
+        }
+    } else {
+        // Write an empty string
+        dst.put_u8(0);
+    }
+}
+
+/// Encode an integer into the given destination buffer
+fn encode_int<B: BufMut>(
+    mut value: usize,   // The integer to encode
+    prefix_bits: usize, // The number of bits in the prefix
+    first_byte: u8,     // The base upon which to start encoding the int
+    dst: &mut B,
+) {
+    if encode_int_one_byte(value, prefix_bits) {
+        dst.put_u8(first_byte | value as u8);
+        return;
+    }
+
+    let low = (1 << prefix_bits) - 1;
+
+    value -= low;
+
+    dst.put_u8(first_byte | low as u8);
+
+    while value >= 128 {
+        dst.put_u8(0b1000_0000 | value as u8);
+
+        value >>= 7;
+    }
+
+    dst.put_u8(value as u8);
+}
+
+/// Returns true if the in the int can be fully encoded in the first byte.
+fn encode_int_one_byte(value: usize, prefix_bits: usize) -> bool {
+    value < (1 << prefix_bits) - 1
+}
+
+fn position(buf: &BytesMut) -> usize {
+    buf.len()
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use http::*;
+
+    #[test]
+    fn test_encode_method_get() {
+        let mut encoder = Encoder::default();
+        let res = encode(&mut encoder, vec![method("GET")]);
+        assert_eq!(*res, [0x80 | 2]);
+        assert_eq!(encoder.table.len(), 0);
+    }
+
+    #[test]
+    fn test_encode_method_post() {
+        let mut encoder = Encoder::default();
+        let res = encode(&mut encoder, vec![method("POST")]);
+        assert_eq!(*res, [0x80 | 3]);
+        assert_eq!(encoder.table.len(), 0);
+    }
+
+    #[test]
+    fn test_encode_method_patch() {
+        let mut encoder = Encoder::default();
+        let res = encode(&mut encoder, vec![method("PATCH")]);
+
+        assert_eq!(res[0], 0b01000000 | 2); // Incremental indexing w/ name pulled from table
+        assert_eq!(res[1], 0x80 | 5); // header value w/ huffman coding
+
+        assert_eq!("PATCH", huff_decode(&res[2..7]));
+        assert_eq!(encoder.table.len(), 1);
+
+        let res = encode(&mut encoder, vec![method("PATCH")]);
+
+        assert_eq!(1 << 7 | 62, res[0]);
+        assert_eq!(1, res.len());
+    }
+
+    #[test]
+    fn test_encode_indexed_name_literal_value() {
+        let mut encoder = Encoder::default();
+        let res = encode(&mut encoder, vec![header("content-language", "foo")]);
+
+        assert_eq!(res[0], 0b01000000 | 27); // Indexed name
+        assert_eq!(res[1], 0x80 | 2); // header value w/ huffman coding
+
+        assert_eq!("foo", huff_decode(&res[2..4]));
+
+        // Same name, new value should still use incremental
+        let res = encode(&mut encoder, vec![header("content-language", "bar")]);
+        assert_eq!(res[0], 0b01000000 | 27); // Indexed name
+        assert_eq!(res[1], 0x80 | 3); // header value w/ huffman coding
+        assert_eq!("bar", huff_decode(&res[2..5]));
+    }
+
+    #[test]
+    fn test_repeated_headers_are_indexed() {
+        let mut encoder = Encoder::default();
+        let res = encode(&mut encoder, vec![header("foo", "hello")]);
+
+        assert_eq!(&[0b01000000, 0x80 | 2], &res[0..2]);
+        assert_eq!("foo", huff_decode(&res[2..4]));
+        assert_eq!(0x80 | 4, res[4]);
+        assert_eq!("hello", huff_decode(&res[5..]));
+        assert_eq!(9, res.len());
+
+        assert_eq!(1, encoder.table.len());
+
+        let res = encode(&mut encoder, vec![header("foo", "hello")]);
+        assert_eq!([0x80 | 62], *res);
+
+        assert_eq!(encoder.table.len(), 1);
+    }
+
+    #[test]
+    fn test_evicting_headers() {
+        let mut encoder = Encoder::default();
+
+        // Fill the table
+        for i in 0..64 {
+            let key = format!("x-hello-world-{:02}", i);
+            let res = encode(&mut encoder, vec![header(&key, &key)]);
+
+            assert_eq!(&[0b01000000, 0x80 | 12], &res[0..2]);
+            assert_eq!(key, huff_decode(&res[2..14]));
+            assert_eq!(0x80 | 12, res[14]);
+            assert_eq!(key, huff_decode(&res[15..]));
+            assert_eq!(27, res.len());
+
+            // Make sure the header can be found...
+            let res = encode(&mut encoder, vec![header(&key, &key)]);
+
+            // Only check that it is found
+            assert_eq!(0x80, res[0] & 0x80);
+        }
+
+        assert_eq!(4096, encoder.table.size());
+        assert_eq!(64, encoder.table.len());
+
+        // Find existing headers
+        for i in 0..64 {
+            let key = format!("x-hello-world-{:02}", i);
+            let res = encode(&mut encoder, vec![header(&key, &key)]);
+            assert_eq!(0x80, res[0] & 0x80);
+        }
+
+        // Insert a new header
+        let key = "x-hello-world-64";
+        let res = encode(&mut encoder, vec![header(key, key)]);
+
+        assert_eq!(&[0b01000000, 0x80 | 12], &res[0..2]);
+        assert_eq!(key, huff_decode(&res[2..14]));
+        assert_eq!(0x80 | 12, res[14]);
+        assert_eq!(key, huff_decode(&res[15..]));
+        assert_eq!(27, res.len());
+
+        assert_eq!(64, encoder.table.len());
+
+        // Now try encoding entries that should exist in the table
+        for i in 1..65 {
+            let key = format!("x-hello-world-{:02}", i);
+            let res = encode(&mut encoder, vec![header(&key, &key)]);
+            assert_eq!(0x80 | (61 + (65 - i)), res[0]);
+        }
+    }
+
+    #[test]
+    fn test_large_headers_are_not_indexed() {
+        let mut encoder = Encoder::new(128, 0);
+        let key = "hello-world-hello-world-HELLO-zzz";
+
+        let res = encode(&mut encoder, vec![header(key, key)]);
+
+        assert_eq!(&[0, 0x80 | 25], &res[..2]);
+
+        assert_eq!(0, encoder.table.len());
+        assert_eq!(0, encoder.table.size());
+    }
+
+    #[test]
+    fn test_sensitive_headers_are_never_indexed() {
+        use http::header::HeaderValue;
+
+        let name = "my-password".parse().unwrap();
+        let mut value = HeaderValue::from_bytes(b"12345").unwrap();
+        value.set_sensitive(true);
+
+        let header = Header::Field {
+            name: Some(name),
+            value,
+        };
+
+        // Now, try to encode the sensitive header
+
+        let mut encoder = Encoder::default();
+        let res = encode(&mut encoder, vec![header]);
+
+        assert_eq!(&[0b10000, 0x80 | 8], &res[..2]);
+        assert_eq!("my-password", huff_decode(&res[2..10]));
+        assert_eq!(0x80 | 4, res[10]);
+        assert_eq!("12345", huff_decode(&res[11..]));
+
+        // Now, try to encode a sensitive header w/ a name in the static table
+        let name = "authorization".parse().unwrap();
+        let mut value = HeaderValue::from_bytes(b"12345").unwrap();
+        value.set_sensitive(true);
+
+        let header = Header::Field {
+            name: Some(name),
+            value,
+        };
+
+        let mut encoder = Encoder::default();
+        let res = encode(&mut encoder, vec![header]);
+
+        assert_eq!(&[0b11111, 8], &res[..2]);
+        assert_eq!(0x80 | 4, res[2]);
+        assert_eq!("12345", huff_decode(&res[3..]));
+
+        // Using the name component of a previously indexed header (without
+        // sensitive flag set)
+
+        let _ = encode(
+            &mut encoder,
+            vec![self::header("my-password", "not-so-secret")],
+        );
+
+        let name = "my-password".parse().unwrap();
+        let mut value = HeaderValue::from_bytes(b"12345").unwrap();
+        value.set_sensitive(true);
+
+        let header = Header::Field {
+            name: Some(name),
+            value,
+        };
+        let res = encode(&mut encoder, vec![header]);
+
+        assert_eq!(&[0b11111, 47], &res[..2]);
+        assert_eq!(0x80 | 4, res[2]);
+        assert_eq!("12345", huff_decode(&res[3..]));
+    }
+
+    #[test]
+    fn test_content_length_value_not_indexed() {
+        let mut encoder = Encoder::default();
+        let res = encode(&mut encoder, vec![header("content-length", "1234")]);
+
+        assert_eq!(&[15, 13, 0x80 | 3], &res[0..3]);
+        assert_eq!("1234", huff_decode(&res[3..]));
+        assert_eq!(6, res.len());
+    }
+
+    #[test]
+    fn test_encoding_headers_with_same_name() {
+        let mut encoder = Encoder::default();
+        let name = "hello";
+
+        // Encode first one
+        let _ = encode(&mut encoder, vec![header(name, "one")]);
+
+        // Encode second one
+        let res = encode(&mut encoder, vec![header(name, "two")]);
+        assert_eq!(&[0x40 | 62, 0x80 | 3], &res[0..2]);
+        assert_eq!("two", huff_decode(&res[2..]));
+        assert_eq!(5, res.len());
+
+        // Encode the first one again
+        let res = encode(&mut encoder, vec![header(name, "one")]);
+        assert_eq!(&[0x80 | 63], &res[..]);
+
+        // Now the second one
+        let res = encode(&mut encoder, vec![header(name, "two")]);
+        assert_eq!(&[0x80 | 62], &res[..]);
+    }
+
+    #[test]
+    fn test_evicting_headers_when_multiple_of_same_name_are_in_table() {
+        // The encoder only has space for 2 headers
+        let mut encoder = Encoder::new(76, 0);
+
+        let _ = encode(&mut encoder, vec![header("foo", "bar")]);
+        assert_eq!(1, encoder.table.len());
+
+        let _ = encode(&mut encoder, vec![header("bar", "foo")]);
+        assert_eq!(2, encoder.table.len());
+
+        // This will evict the first header, while still referencing the header
+        // name
+        let res = encode(&mut encoder, vec![header("foo", "baz")]);
+        assert_eq!(&[0x40 | 63, 0, 0x80 | 3], &res[..3]);
+        assert_eq!(2, encoder.table.len());
+
+        // Try adding the same header again
+        let res = encode(&mut encoder, vec![header("foo", "baz")]);
+        assert_eq!(&[0x80 | 62], &res[..]);
+        assert_eq!(2, encoder.table.len());
+    }
+
+    #[test]
+    fn test_max_size_zero() {
+        // Static table only
+        let mut encoder = Encoder::new(0, 0);
+        let res = encode(&mut encoder, vec![method("GET")]);
+        assert_eq!(*res, [0x80 | 2]);
+        assert_eq!(encoder.table.len(), 0);
+
+        let res = encode(&mut encoder, vec![header("foo", "bar")]);
+        assert_eq!(&[0, 0x80 | 2], &res[..2]);
+        assert_eq!("foo", huff_decode(&res[2..4]));
+        assert_eq!(0x80 | 3, res[4]);
+        assert_eq!("bar", huff_decode(&res[5..8]));
+        assert_eq!(0, encoder.table.len());
+
+        // Encode a custom value
+        let res = encode(&mut encoder, vec![header("transfer-encoding", "chunked")]);
+        assert_eq!(&[15, 42, 0x80 | 6], &res[..3]);
+        assert_eq!("chunked", huff_decode(&res[3..]));
+    }
+
+    #[test]
+    fn test_update_max_size_combos() {
+        let mut encoder = Encoder::default();
+        assert!(encoder.size_update.is_none());
+        assert_eq!(4096, encoder.table.max_size());
+
+        encoder.update_max_size(4096); // Default size
+        assert!(encoder.size_update.is_none());
+
+        encoder.update_max_size(0);
+        assert_eq!(Some(SizeUpdate::One(0)), encoder.size_update);
+
+        encoder.update_max_size(100);
+        assert_eq!(Some(SizeUpdate::Two(0, 100)), encoder.size_update);
+
+        let mut encoder = Encoder::default();
+        encoder.update_max_size(8000);
+        assert_eq!(Some(SizeUpdate::One(8000)), encoder.size_update);
+
+        encoder.update_max_size(100);
+        assert_eq!(Some(SizeUpdate::One(100)), encoder.size_update);
+
+        encoder.update_max_size(8000);
+        assert_eq!(Some(SizeUpdate::Two(100, 8000)), encoder.size_update);
+
+        encoder.update_max_size(4000);
+        assert_eq!(Some(SizeUpdate::Two(100, 4000)), encoder.size_update);
+
+        encoder.update_max_size(50);
+        assert_eq!(Some(SizeUpdate::One(50)), encoder.size_update);
+    }
+
+    #[test]
+    fn test_resizing_table() {
+        let mut encoder = Encoder::default();
+
+        // Add a header
+        let _ = encode(&mut encoder, vec![header("foo", "bar")]);
+
+        encoder.update_max_size(1);
+        assert_eq!(1, encoder.table.len());
+
+        let res = encode(&mut encoder, vec![method("GET")]);
+        assert_eq!(&[32 | 1, 0x80 | 2], &res[..]);
+        assert_eq!(0, encoder.table.len());
+
+        let res = encode(&mut encoder, vec![header("foo", "bar")]);
+        assert_eq!(0, res[0]);
+
+        encoder.update_max_size(100);
+        let res = encode(&mut encoder, vec![header("foo", "bar")]);
+        assert_eq!(&[32 | 31, 69, 64], &res[..3]);
+
+        encoder.update_max_size(0);
+        let res = encode(&mut encoder, vec![header("foo", "bar")]);
+        assert_eq!(&[32, 0], &res[..2]);
+    }
+
+    #[test]
+    fn test_decreasing_table_size_without_eviction() {
+        let mut encoder = Encoder::default();
+
+        // Add a header
+        let _ = encode(&mut encoder, vec![header("foo", "bar")]);
+
+        encoder.update_max_size(100);
+        assert_eq!(1, encoder.table.len());
+
+        let res = encode(&mut encoder, vec![header("foo", "bar")]);
+        assert_eq!(&[32 | 31, 69, 0x80 | 62], &res[..]);
+    }
+
+    #[test]
+    fn test_nameless_header() {
+        let mut encoder = Encoder::default();
+
+        let res = encode(
+            &mut encoder,
+            vec![
+                Header::Field {
+                    name: Some("hello".parse().unwrap()),
+                    value: HeaderValue::from_bytes(b"world").unwrap(),
+                },
+                Header::Field {
+                    name: None,
+                    value: HeaderValue::from_bytes(b"zomg").unwrap(),
+                },
+            ],
+        );
+
+        assert_eq!(&[0x40, 0x80 | 4], &res[0..2]);
+        assert_eq!("hello", huff_decode(&res[2..6]));
+        assert_eq!(0x80 | 4, res[6]);
+        assert_eq!("world", huff_decode(&res[7..11]));
+
+        // Next is not indexed
+        assert_eq!(&[15, 47, 0x80 | 3], &res[11..14]);
+        assert_eq!("zomg", huff_decode(&res[14..]));
+    }
+
+    #[test]
+    fn test_large_size_update() {
+        let mut encoder = Encoder::default();
+
+        encoder.update_max_size(1912930560);
+        assert_eq!(Some(SizeUpdate::One(1912930560)), encoder.size_update);
+
+        let mut dst = BytesMut::with_capacity(6);
+        encoder.encode_size_updates(&mut dst);
+        assert_eq!([63, 225, 129, 148, 144, 7], &dst[..]);
+    }
+
+    #[test]
+    #[ignore]
+    fn test_evicted_overflow() {
+        // Not sure what the best way to do this is.
+    }
+
+    fn encode(e: &mut Encoder, hdrs: Vec<Header<Option<HeaderName>>>) -> BytesMut {
+        let mut dst = BytesMut::with_capacity(1024);
+        e.encode(&mut hdrs.into_iter(), &mut dst);
+        dst
+    }
+
+    fn method(s: &str) -> Header<Option<HeaderName>> {
+        Header::Method(Method::from_bytes(s.as_bytes()).unwrap())
+    }
+
+    fn header(name: &str, val: &str) -> Header<Option<HeaderName>> {
+        let name = HeaderName::from_bytes(name.as_bytes()).unwrap();
+        let value = HeaderValue::from_bytes(val.as_bytes()).unwrap();
+
+        Header::Field {
+            name: Some(name),
+            value,
+        }
+    }
+
+    fn huff_decode(src: &[u8]) -> BytesMut {
+        let mut buf = BytesMut::new();
+        huffman::decode(src, &mut buf).unwrap()
+    }
+}
diff --git a/src/hpack/header.rs b/src/hpack/header.rs
new file mode 100644
index 0000000..0b5d1fd
--- /dev/null
+++ b/src/hpack/header.rs
@@ -0,0 +1,308 @@
+use super::{DecoderError, NeedMore};
+use crate::ext::Protocol;
+
+use bytes::Bytes;
+use http::header::{HeaderName, HeaderValue};
+use http::{Method, StatusCode};
+use std::fmt;
+
+/// HTTP/2 Header
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub enum Header<T = HeaderName> {
+    Field { name: T, value: HeaderValue },
+    // TODO: Change these types to `http::uri` types.
+    Authority(BytesStr),
+    Method(Method),
+    Scheme(BytesStr),
+    Path(BytesStr),
+    Protocol(Protocol),
+    Status(StatusCode),
+}
+
+/// The header field name
+#[derive(Debug, Clone, Eq, PartialEq, Hash)]
+pub enum Name<'a> {
+    Field(&'a HeaderName),
+    Authority,
+    Method,
+    Scheme,
+    Path,
+    Protocol,
+    Status,
+}
+
+#[doc(hidden)]
+#[derive(Clone, Eq, PartialEq, Default)]
+pub struct BytesStr(Bytes);
+
+pub fn len(name: &HeaderName, value: &HeaderValue) -> usize {
+    let n: &str = name.as_ref();
+    32 + n.len() + value.len()
+}
+
+impl Header<Option<HeaderName>> {
+    pub fn reify(self) -> Result<Header, HeaderValue> {
+        use self::Header::*;
+
+        Ok(match self {
+            Field {
+                name: Some(n),
+                value,
+            } => Field { name: n, value },
+            Field { name: None, value } => return Err(value),
+            Authority(v) => Authority(v),
+            Method(v) => Method(v),
+            Scheme(v) => Scheme(v),
+            Path(v) => Path(v),
+            Protocol(v) => Protocol(v),
+            Status(v) => Status(v),
+        })
+    }
+}
+
+impl Header {
+    pub fn new(name: Bytes, value: Bytes) -> Result<Header, DecoderError> {
+        if name.is_empty() {
+            return Err(DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream));
+        }
+        if name[0] == b':' {
+            match &name[1..] {
+                b"authority" => {
+                    let value = BytesStr::try_from(value)?;
+                    Ok(Header::Authority(value))
+                }
+                b"method" => {
+                    let method = Method::from_bytes(&value)?;
+                    Ok(Header::Method(method))
+                }
+                b"scheme" => {
+                    let value = BytesStr::try_from(value)?;
+                    Ok(Header::Scheme(value))
+                }
+                b"path" => {
+                    let value = BytesStr::try_from(value)?;
+                    Ok(Header::Path(value))
+                }
+                b"protocol" => {
+                    let value = Protocol::try_from(value)?;
+                    Ok(Header::Protocol(value))
+                }
+                b"status" => {
+                    let status = StatusCode::from_bytes(&value)?;
+                    Ok(Header::Status(status))
+                }
+                _ => Err(DecoderError::InvalidPseudoheader),
+            }
+        } else {
+            // HTTP/2 requires lower case header names
+            let name = HeaderName::from_lowercase(&name)?;
+            let value = HeaderValue::from_bytes(&value)?;
+
+            Ok(Header::Field { name, value })
+        }
+    }
+
+    pub fn len(&self) -> usize {
+        match *self {
+            Header::Field {
+                ref name,
+                ref value,
+            } => len(name, value),
+            Header::Authority(ref v) => 32 + 10 + v.len(),
+            Header::Method(ref v) => 32 + 7 + v.as_ref().len(),
+            Header::Scheme(ref v) => 32 + 7 + v.len(),
+            Header::Path(ref v) => 32 + 5 + v.len(),
+            Header::Protocol(ref v) => 32 + 9 + v.as_str().len(),
+            Header::Status(_) => 32 + 7 + 3,
+        }
+    }
+
+    /// Returns the header name
+    pub fn name(&self) -> Name {
+        match *self {
+            Header::Field { ref name, .. } => Name::Field(name),
+            Header::Authority(..) => Name::Authority,
+            Header::Method(..) => Name::Method,
+            Header::Scheme(..) => Name::Scheme,
+            Header::Path(..) => Name::Path,
+            Header::Protocol(..) => Name::Protocol,
+            Header::Status(..) => Name::Status,
+        }
+    }
+
+    pub fn value_slice(&self) -> &[u8] {
+        match *self {
+            Header::Field { ref value, .. } => value.as_ref(),
+            Header::Authority(ref v) => v.as_ref(),
+            Header::Method(ref v) => v.as_ref().as_ref(),
+            Header::Scheme(ref v) => v.as_ref(),
+            Header::Path(ref v) => v.as_ref(),
+            Header::Protocol(ref v) => v.as_ref(),
+            Header::Status(ref v) => v.as_str().as_ref(),
+        }
+    }
+
+    pub fn value_eq(&self, other: &Header) -> bool {
+        match *self {
+            Header::Field { ref value, .. } => {
+                let a = value;
+                match *other {
+                    Header::Field { ref value, .. } => a == value,
+                    _ => false,
+                }
+            }
+            Header::Authority(ref a) => match *other {
+                Header::Authority(ref b) => a == b,
+                _ => false,
+            },
+            Header::Method(ref a) => match *other {
+                Header::Method(ref b) => a == b,
+                _ => false,
+            },
+            Header::Scheme(ref a) => match *other {
+                Header::Scheme(ref b) => a == b,
+                _ => false,
+            },
+            Header::Path(ref a) => match *other {
+                Header::Path(ref b) => a == b,
+                _ => false,
+            },
+            Header::Protocol(ref a) => match *other {
+                Header::Protocol(ref b) => a == b,
+                _ => false,
+            },
+            Header::Status(ref a) => match *other {
+                Header::Status(ref b) => a == b,
+                _ => false,
+            },
+        }
+    }
+
+    pub fn is_sensitive(&self) -> bool {
+        match *self {
+            Header::Field { ref value, .. } => value.is_sensitive(),
+            // TODO: Technically these other header values can be sensitive too.
+            _ => false,
+        }
+    }
+
+    pub fn skip_value_index(&self) -> bool {
+        use http::header;
+
+        match *self {
+            Header::Field { ref name, .. } => matches!(
+                *name,
+                header::AGE
+                    | header::AUTHORIZATION
+                    | header::CONTENT_LENGTH
+                    | header::ETAG
+                    | header::IF_MODIFIED_SINCE
+                    | header::IF_NONE_MATCH
+                    | header::LOCATION
+                    | header::COOKIE
+                    | header::SET_COOKIE
+            ),
+            Header::Path(..) => true,
+            _ => false,
+        }
+    }
+}
+
+// Mostly for tests
+impl From<Header> for Header<Option<HeaderName>> {
+    fn from(src: Header) -> Self {
+        match src {
+            Header::Field { name, value } => Header::Field {
+                name: Some(name),
+                value,
+            },
+            Header::Authority(v) => Header::Authority(v),
+            Header::Method(v) => Header::Method(v),
+            Header::Scheme(v) => Header::Scheme(v),
+            Header::Path(v) => Header::Path(v),
+            Header::Protocol(v) => Header::Protocol(v),
+            Header::Status(v) => Header::Status(v),
+        }
+    }
+}
+
+impl<'a> Name<'a> {
+    pub fn into_entry(self, value: Bytes) -> Result<Header, DecoderError> {
+        match self {
+            Name::Field(name) => Ok(Header::Field {
+                name: name.clone(),
+                value: HeaderValue::from_bytes(&value)?,
+            }),
+            Name::Authority => Ok(Header::Authority(BytesStr::try_from(value)?)),
+            Name::Method => Ok(Header::Method(Method::from_bytes(&value)?)),
+            Name::Scheme => Ok(Header::Scheme(BytesStr::try_from(value)?)),
+            Name::Path => Ok(Header::Path(BytesStr::try_from(value)?)),
+            Name::Protocol => Ok(Header::Protocol(Protocol::try_from(value)?)),
+            Name::Status => {
+                match StatusCode::from_bytes(&value) {
+                    Ok(status) => Ok(Header::Status(status)),
+                    // TODO: better error handling
+                    Err(_) => Err(DecoderError::InvalidStatusCode),
+                }
+            }
+        }
+    }
+
+    pub fn as_slice(&self) -> &[u8] {
+        match *self {
+            Name::Field(ref name) => name.as_ref(),
+            Name::Authority => b":authority",
+            Name::Method => b":method",
+            Name::Scheme => b":scheme",
+            Name::Path => b":path",
+            Name::Protocol => b":protocol",
+            Name::Status => b":status",
+        }
+    }
+}
+
+// ===== impl BytesStr =====
+
+impl BytesStr {
+    pub(crate) const fn from_static(value: &'static str) -> Self {
+        BytesStr(Bytes::from_static(value.as_bytes()))
+    }
+
+    pub(crate) fn from(value: &str) -> Self {
+        BytesStr(Bytes::copy_from_slice(value.as_bytes()))
+    }
+
+    #[doc(hidden)]
+    pub fn try_from(bytes: Bytes) -> Result<Self, std::str::Utf8Error> {
+        std::str::from_utf8(bytes.as_ref())?;
+        Ok(BytesStr(bytes))
+    }
+
+    pub(crate) fn as_str(&self) -> &str {
+        // Safety: check valid utf-8 in constructor
+        unsafe { std::str::from_utf8_unchecked(self.0.as_ref()) }
+    }
+
+    pub(crate) fn into_inner(self) -> Bytes {
+        self.0
+    }
+}
+
+impl std::ops::Deref for BytesStr {
+    type Target = str;
+    fn deref(&self) -> &str {
+        self.as_str()
+    }
+}
+
+impl AsRef<[u8]> for BytesStr {
+    fn as_ref(&self) -> &[u8] {
+        self.0.as_ref()
+    }
+}
+
+impl fmt::Debug for BytesStr {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.0.fmt(f)
+    }
+}
diff --git a/src/hpack/huffman/mod.rs b/src/hpack/huffman/mod.rs
new file mode 100644
index 0000000..86c97eb
--- /dev/null
+++ b/src/hpack/huffman/mod.rs
@@ -0,0 +1,199 @@
+mod table;
+
+use self::table::{DECODE_TABLE, ENCODE_TABLE};
+use crate::hpack::DecoderError;
+
+use bytes::{BufMut, BytesMut};
+
+// Constructed in the generated `table.rs` file
+struct Decoder {
+    state: usize,
+    maybe_eos: bool,
+}
+
+// These flags must match the ones in genhuff.rs
+
+const MAYBE_EOS: u8 = 1;
+const DECODED: u8 = 2;
+const ERROR: u8 = 4;
+
+pub fn decode(src: &[u8], buf: &mut BytesMut) -> Result<BytesMut, DecoderError> {
+    let mut decoder = Decoder::new();
+
+    // Max compression ratio is >= 0.5
+    buf.reserve(src.len() << 1);
+
+    for b in src {
+        if let Some(b) = decoder.decode4(b >> 4)? {
+            buf.put_u8(b);
+        }
+
+        if let Some(b) = decoder.decode4(b & 0xf)? {
+            buf.put_u8(b);
+        }
+    }
+
+    if !decoder.is_final() {
+        return Err(DecoderError::InvalidHuffmanCode);
+    }
+
+    Ok(buf.split())
+}
+
+pub fn encode(src: &[u8], dst: &mut BytesMut) {
+    let mut bits: u64 = 0;
+    let mut bits_left = 40;
+
+    for &b in src {
+        let (nbits, code) = ENCODE_TABLE[b as usize];
+
+        bits |= code << (bits_left - nbits);
+        bits_left -= nbits;
+
+        while bits_left <= 32 {
+            dst.put_u8((bits >> 32) as u8);
+
+            bits <<= 8;
+            bits_left += 8;
+        }
+    }
+
+    if bits_left != 40 {
+        // This writes the EOS token
+        bits |= (1 << bits_left) - 1;
+        dst.put_u8((bits >> 32) as u8);
+    }
+}
+
+impl Decoder {
+    fn new() -> Decoder {
+        Decoder {
+            state: 0,
+            maybe_eos: false,
+        }
+    }
+
+    // Decodes 4 bits
+    fn decode4(&mut self, input: u8) -> Result<Option<u8>, DecoderError> {
+        // (next-state, byte, flags)
+        let (next, byte, flags) = DECODE_TABLE[self.state][input as usize];
+
+        if flags & ERROR == ERROR {
+            // Data followed the EOS marker
+            return Err(DecoderError::InvalidHuffmanCode);
+        }
+
+        let mut ret = None;
+
+        if flags & DECODED == DECODED {
+            ret = Some(byte);
+        }
+
+        self.state = next;
+        self.maybe_eos = flags & MAYBE_EOS == MAYBE_EOS;
+
+        Ok(ret)
+    }
+
+    fn is_final(&self) -> bool {
+        self.state == 0 || self.maybe_eos
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+
+    fn decode(src: &[u8]) -> Result<BytesMut, DecoderError> {
+        let mut buf = BytesMut::new();
+        super::decode(src, &mut buf)
+    }
+
+    #[test]
+    fn decode_single_byte() {
+        assert_eq!("o", decode(&[0b00111111]).unwrap());
+        assert_eq!("0", decode(&[7]).unwrap());
+        assert_eq!("A", decode(&[(0x21 << 2) + 3]).unwrap());
+    }
+
+    #[test]
+    fn single_char_multi_byte() {
+        assert_eq!("#", decode(&[255, 160 + 15]).unwrap());
+        assert_eq!("$", decode(&[255, 200 + 7]).unwrap());
+        assert_eq!("\x0a", decode(&[255, 255, 255, 240 + 3]).unwrap());
+    }
+
+    #[test]
+    fn multi_char() {
+        assert_eq!("!0", decode(&[254, 1]).unwrap());
+        assert_eq!(" !", decode(&[0b01010011, 0b11111000]).unwrap());
+    }
+
+    #[test]
+    fn encode_single_byte() {
+        let mut dst = BytesMut::with_capacity(1);
+
+        encode(b"o", &mut dst);
+        assert_eq!(&dst[..], &[0b00111111]);
+
+        dst.clear();
+        encode(b"0", &mut dst);
+        assert_eq!(&dst[..], &[7]);
+
+        dst.clear();
+        encode(b"A", &mut dst);
+        assert_eq!(&dst[..], &[(0x21 << 2) + 3]);
+    }
+
+    #[test]
+    fn encode_decode_str() {
+        const DATA: &[&str] = &[
+            "hello world",
+            ":method",
+            ":scheme",
+            ":authority",
+            "yahoo.co.jp",
+            "GET",
+            "http",
+            ":path",
+            "/images/top/sp2/cmn/logo-ns-130528.png",
+            "example.com",
+            "hpack-test",
+            "xxxxxxx1",
+            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20100101 Firefox/16.0",
+            "accept",
+            "Accept",
+            "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
+            "cookie",
+            "B=76j09a189a6h4&b=3&s=0b",
+            "TE",
+            "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi non bibendum libero. \
+             Etiam ultrices lorem ut.",
+        ];
+
+        for s in DATA {
+            let mut dst = BytesMut::with_capacity(s.len());
+
+            encode(s.as_bytes(), &mut dst);
+
+            let decoded = decode(&dst).unwrap();
+
+            assert_eq!(&decoded[..], s.as_bytes());
+        }
+    }
+
+    #[test]
+    fn encode_decode_u8() {
+        const DATA: &[&[u8]] = &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"];
+
+        for s in DATA {
+            let mut dst = BytesMut::with_capacity(s.len());
+
+            encode(s, &mut dst);
+
+            let decoded = decode(&dst).unwrap();
+
+            assert_eq!(&decoded[..], &s[..]);
+        }
+    }
+}
diff --git a/src/hpack/huffman/table.rs b/src/hpack/huffman/table.rs
new file mode 100644
index 0000000..560cfaf
--- /dev/null
+++ b/src/hpack/huffman/table.rs
@@ -0,0 +1,5130 @@
+// !!! DO NOT EDIT !!! Generated by util/genhuff/src/main.rs
+
+// (num-bits, bits)
+pub const ENCODE_TABLE: [(usize, u64); 257] = [
+    (13, 0x1ff8),
+    (23, 0x007f_ffd8),
+    (28, 0x0fff_ffe2),
+    (28, 0x0fff_ffe3),
+    (28, 0x0fff_ffe4),
+    (28, 0x0fff_ffe5),
+    (28, 0x0fff_ffe6),
+    (28, 0x0fff_ffe7),
+    (28, 0x0fff_ffe8),
+    (24, 0x00ff_ffea),
+    (30, 0x3fff_fffc),
+    (28, 0x0fff_ffe9),
+    (28, 0x0fff_ffea),
+    (30, 0x3fff_fffd),
+    (28, 0x0fff_ffeb),
+    (28, 0x0fff_ffec),
+    (28, 0x0fff_ffed),
+    (28, 0x0fff_ffee),
+    (28, 0x0fff_ffef),
+    (28, 0x0fff_fff0),
+    (28, 0x0fff_fff1),
+    (28, 0x0fff_fff2),
+    (30, 0x3fff_fffe),
+    (28, 0x0fff_fff3),
+    (28, 0x0fff_fff4),
+    (28, 0x0fff_fff5),
+    (28, 0x0fff_fff6),
+    (28, 0x0fff_fff7),
+    (28, 0x0fff_fff8),
+    (28, 0x0fff_fff9),
+    (28, 0x0fff_fffa),
+    (28, 0x0fff_fffb),
+    (6, 0x14),
+    (10, 0x3f8),
+    (10, 0x3f9),
+    (12, 0xffa),
+    (13, 0x1ff9),
+    (6, 0x15),
+    (8, 0xf8),
+    (11, 0x7fa),
+    (10, 0x3fa),
+    (10, 0x3fb),
+    (8, 0xf9),
+    (11, 0x7fb),
+    (8, 0xfa),
+    (6, 0x16),
+    (6, 0x17),
+    (6, 0x18),
+    (5, 0x0),
+    (5, 0x1),
+    (5, 0x2),
+    (6, 0x19),
+    (6, 0x1a),
+    (6, 0x1b),
+    (6, 0x1c),
+    (6, 0x1d),
+    (6, 0x1e),
+    (6, 0x1f),
+    (7, 0x5c),
+    (8, 0xfb),
+    (15, 0x7ffc),
+    (6, 0x20),
+    (12, 0xffb),
+    (10, 0x3fc),
+    (13, 0x1ffa),
+    (6, 0x21),
+    (7, 0x5d),
+    (7, 0x5e),
+    (7, 0x5f),
+    (7, 0x60),
+    (7, 0x61),
+    (7, 0x62),
+    (7, 0x63),
+    (7, 0x64),
+    (7, 0x65),
+    (7, 0x66),
+    (7, 0x67),
+    (7, 0x68),
+    (7, 0x69),
+    (7, 0x6a),
+    (7, 0x6b),
+    (7, 0x6c),
+    (7, 0x6d),
+    (7, 0x6e),
+    (7, 0x6f),
+    (7, 0x70),
+    (7, 0x71),
+    (7, 0x72),
+    (8, 0xfc),
+    (7, 0x73),
+    (8, 0xfd),
+    (13, 0x1ffb),
+    (19, 0x7fff0),
+    (13, 0x1ffc),
+    (14, 0x3ffc),
+    (6, 0x22),
+    (15, 0x7ffd),
+    (5, 0x3),
+    (6, 0x23),
+    (5, 0x4),
+    (6, 0x24),
+    (5, 0x5),
+    (6, 0x25),
+    (6, 0x26),
+    (6, 0x27),
+    (5, 0x6),
+    (7, 0x74),
+    (7, 0x75),
+    (6, 0x28),
+    (6, 0x29),
+    (6, 0x2a),
+    (5, 0x7),
+    (6, 0x2b),
+    (7, 0x76),
+    (6, 0x2c),
+    (5, 0x8),
+    (5, 0x9),
+    (6, 0x2d),
+    (7, 0x77),
+    (7, 0x78),
+    (7, 0x79),
+    (7, 0x7a),
+    (7, 0x7b),
+    (15, 0x7ffe),
+    (11, 0x7fc),
+    (14, 0x3ffd),
+    (13, 0x1ffd),
+    (28, 0x0fff_fffc),
+    (20, 0xfffe6),
+    (22, 0x003f_ffd2),
+    (20, 0xfffe7),
+    (20, 0xfffe8),
+    (22, 0x003f_ffd3),
+    (22, 0x003f_ffd4),
+    (22, 0x003f_ffd5),
+    (23, 0x007f_ffd9),
+    (22, 0x003f_ffd6),
+    (23, 0x007f_ffda),
+    (23, 0x007f_ffdb),
+    (23, 0x007f_ffdc),
+    (23, 0x007f_ffdd),
+    (23, 0x007f_ffde),
+    (24, 0x00ff_ffeb),
+    (23, 0x007f_ffdf),
+    (24, 0x00ff_ffec),
+    (24, 0x00ff_ffed),
+    (22, 0x003f_ffd7),
+    (23, 0x007f_ffe0),
+    (24, 0x00ff_ffee),
+    (23, 0x007f_ffe1),
+    (23, 0x007f_ffe2),
+    (23, 0x007f_ffe3),
+    (23, 0x007f_ffe4),
+    (21, 0x001f_ffdc),
+    (22, 0x003f_ffd8),
+    (23, 0x007f_ffe5),
+    (22, 0x003f_ffd9),
+    (23, 0x007f_ffe6),
+    (23, 0x007f_ffe7),
+    (24, 0x00ff_ffef),
+    (22, 0x003f_ffda),
+    (21, 0x001f_ffdd),
+    (20, 0xfffe9),
+    (22, 0x003f_ffdb),
+    (22, 0x003f_ffdc),
+    (23, 0x007f_ffe8),
+    (23, 0x007f_ffe9),
+    (21, 0x001f_ffde),
+    (23, 0x007f_ffea),
+    (22, 0x003f_ffdd),
+    (22, 0x003f_ffde),
+    (24, 0x00ff_fff0),
+    (21, 0x001f_ffdf),
+    (22, 0x003f_ffdf),
+    (23, 0x007f_ffeb),
+    (23, 0x007f_ffec),
+    (21, 0x001f_ffe0),
+    (21, 0x001f_ffe1),
+    (22, 0x003f_ffe0),
+    (21, 0x001f_ffe2),
+    (23, 0x007f_ffed),
+    (22, 0x003f_ffe1),
+    (23, 0x007f_ffee),
+    (23, 0x007f_ffef),
+    (20, 0xfffea),
+    (22, 0x003f_ffe2),
+    (22, 0x003f_ffe3),
+    (22, 0x003f_ffe4),
+    (23, 0x007f_fff0),
+    (22, 0x003f_ffe5),
+    (22, 0x003f_ffe6),
+    (23, 0x007f_fff1),
+    (26, 0x03ff_ffe0),
+    (26, 0x03ff_ffe1),
+    (20, 0xfffeb),
+    (19, 0x7fff1),
+    (22, 0x003f_ffe7),
+    (23, 0x007f_fff2),
+    (22, 0x003f_ffe8),
+    (25, 0x01ff_ffec),
+    (26, 0x03ff_ffe2),
+    (26, 0x03ff_ffe3),
+    (26, 0x03ff_ffe4),
+    (27, 0x07ff_ffde),
+    (27, 0x07ff_ffdf),
+    (26, 0x03ff_ffe5),
+    (24, 0x00ff_fff1),
+    (25, 0x01ff_ffed),
+    (19, 0x7fff2),
+    (21, 0x001f_ffe3),
+    (26, 0x03ff_ffe6),
+    (27, 0x07ff_ffe0),
+    (27, 0x07ff_ffe1),
+    (26, 0x03ff_ffe7),
+    (27, 0x07ff_ffe2),
+    (24, 0x00ff_fff2),
+    (21, 0x001f_ffe4),
+    (21, 0x001f_ffe5),
+    (26, 0x03ff_ffe8),
+    (26, 0x03ff_ffe9),
+    (28, 0x0fff_fffd),
+    (27, 0x07ff_ffe3),
+    (27, 0x07ff_ffe4),
+    (27, 0x07ff_ffe5),
+    (20, 0xfffec),
+    (24, 0x00ff_fff3),
+    (20, 0xfffed),
+    (21, 0x001f_ffe6),
+    (22, 0x003f_ffe9),
+    (21, 0x001f_ffe7),
+    (21, 0x001f_ffe8),
+    (23, 0x007f_fff3),
+    (22, 0x003f_ffea),
+    (22, 0x003f_ffeb),
+    (25, 0x01ff_ffee),
+    (25, 0x01ff_ffef),
+    (24, 0x00ff_fff4),
+    (24, 0x00ff_fff5),
+    (26, 0x03ff_ffea),
+    (23, 0x007f_fff4),
+    (26, 0x03ff_ffeb),
+    (27, 0x07ff_ffe6),
+    (26, 0x03ff_ffec),
+    (26, 0x03ff_ffed),
+    (27, 0x07ff_ffe7),
+    (27, 0x07ff_ffe8),
+    (27, 0x07ff_ffe9),
+    (27, 0x07ff_ffea),
+    (27, 0x07ff_ffeb),
+    (28, 0x0fff_fffe),
+    (27, 0x07ff_ffec),
+    (27, 0x07ff_ffed),
+    (27, 0x07ff_ffee),
+    (27, 0x07ff_ffef),
+    (27, 0x07ff_fff0),
+    (26, 0x03ff_ffee),
+    (30, 0x3fff_ffff),
+];
+
+// (next-state, byte, flags)
+pub const DECODE_TABLE: [[(usize, u8, u8); 16]; 256] = [
+    // 0
+    [
+        (4, 0, 0x00),
+        (5, 0, 0x00),
+        (7, 0, 0x00),
+        (8, 0, 0x00),
+        (11, 0, 0x00),
+        (12, 0, 0x00),
+        (16, 0, 0x00),
+        (19, 0, 0x00),
+        (25, 0, 0x00),
+        (28, 0, 0x00),
+        (32, 0, 0x00),
+        (35, 0, 0x00),
+        (42, 0, 0x00),
+        (49, 0, 0x00),
+        (57, 0, 0x00),
+        (64, 0, 0x01),
+    ],
+    // 1
+    [
+        (0, 48, 0x02),
+        (0, 49, 0x02),
+        (0, 50, 0x02),
+        (0, 97, 0x02),
+        (0, 99, 0x02),
+        (0, 101, 0x02),
+        (0, 105, 0x02),
+        (0, 111, 0x02),
+        (0, 115, 0x02),
+        (0, 116, 0x02),
+        (13, 0, 0x00),
+        (14, 0, 0x00),
+        (17, 0, 0x00),
+        (18, 0, 0x00),
+        (20, 0, 0x00),
+        (21, 0, 0x00),
+    ],
+    // 2
+    [
+        (1, 48, 0x02),
+        (22, 48, 0x03),
+        (1, 49, 0x02),
+        (22, 49, 0x03),
+        (1, 50, 0x02),
+        (22, 50, 0x03),
+        (1, 97, 0x02),
+        (22, 97, 0x03),
+        (1, 99, 0x02),
+        (22, 99, 0x03),
+        (1, 101, 0x02),
+        (22, 101, 0x03),
+        (1, 105, 0x02),
+        (22, 105, 0x03),
+        (1, 111, 0x02),
+        (22, 111, 0x03),
+    ],
+    // 3
+    [
+        (2, 48, 0x02),
+        (9, 48, 0x02),
+        (23, 48, 0x02),
+        (40, 48, 0x03),
+        (2, 49, 0x02),
+        (9, 49, 0x02),
+        (23, 49, 0x02),
+        (40, 49, 0x03),
+        (2, 50, 0x02),
+        (9, 50, 0x02),
+        (23, 50, 0x02),
+        (40, 50, 0x03),
+        (2, 97, 0x02),
+        (9, 97, 0x02),
+        (23, 97, 0x02),
+        (40, 97, 0x03),
+    ],
+    // 4
+    [
+        (3, 48, 0x02),
+        (6, 48, 0x02),
+        (10, 48, 0x02),
+        (15, 48, 0x02),
+        (24, 48, 0x02),
+        (31, 48, 0x02),
+        (41, 48, 0x02),
+        (56, 48, 0x03),
+        (3, 49, 0x02),
+        (6, 49, 0x02),
+        (10, 49, 0x02),
+        (15, 49, 0x02),
+        (24, 49, 0x02),
+        (31, 49, 0x02),
+        (41, 49, 0x02),
+        (56, 49, 0x03),
+    ],
+    // 5
+    [
+        (3, 50, 0x02),
+        (6, 50, 0x02),
+        (10, 50, 0x02),
+        (15, 50, 0x02),
+        (24, 50, 0x02),
+        (31, 50, 0x02),
+        (41, 50, 0x02),
+        (56, 50, 0x03),
+        (3, 97, 0x02),
+        (6, 97, 0x02),
+        (10, 97, 0x02),
+        (15, 97, 0x02),
+        (24, 97, 0x02),
+        (31, 97, 0x02),
+        (41, 97, 0x02),
+        (56, 97, 0x03),
+    ],
+    // 6
+    [
+        (2, 99, 0x02),
+        (9, 99, 0x02),
+        (23, 99, 0x02),
+        (40, 99, 0x03),
+        (2, 101, 0x02),
+        (9, 101, 0x02),
+        (23, 101, 0x02),
+        (40, 101, 0x03),
+        (2, 105, 0x02),
+        (9, 105, 0x02),
+        (23, 105, 0x02),
+        (40, 105, 0x03),
+        (2, 111, 0x02),
+        (9, 111, 0x02),
+        (23, 111, 0x02),
+        (40, 111, 0x03),
+    ],
+    // 7
+    [
+        (3, 99, 0x02),
+        (6, 99, 0x02),
+        (10, 99, 0x02),
+        (15, 99, 0x02),
+        (24, 99, 0x02),
+        (31, 99, 0x02),
+        (41, 99, 0x02),
+        (56, 99, 0x03),
+        (3, 101, 0x02),
+        (6, 101, 0x02),
+        (10, 101, 0x02),
+        (15, 101, 0x02),
+        (24, 101, 0x02),
+        (31, 101, 0x02),
+        (41, 101, 0x02),
+        (56, 101, 0x03),
+    ],
+    // 8
+    [
+        (3, 105, 0x02),
+        (6, 105, 0x02),
+        (10, 105, 0x02),
+        (15, 105, 0x02),
+        (24, 105, 0x02),
+        (31, 105, 0x02),
+        (41, 105, 0x02),
+        (56, 105, 0x03),
+        (3, 111, 0x02),
+        (6, 111, 0x02),
+        (10, 111, 0x02),
+        (15, 111, 0x02),
+        (24, 111, 0x02),
+        (31, 111, 0x02),
+        (41, 111, 0x02),
+        (56, 111, 0x03),
+    ],
+    // 9
+    [
+        (1, 115, 0x02),
+        (22, 115, 0x03),
+        (1, 116, 0x02),
+        (22, 116, 0x03),
+        (0, 32, 0x02),
+        (0, 37, 0x02),
+        (0, 45, 0x02),
+        (0, 46, 0x02),
+        (0, 47, 0x02),
+        (0, 51, 0x02),
+        (0, 52, 0x02),
+        (0, 53, 0x02),
+        (0, 54, 0x02),
+        (0, 55, 0x02),
+        (0, 56, 0x02),
+        (0, 57, 0x02),
+    ],
+    // 10
+    [
+        (2, 115, 0x02),
+        (9, 115, 0x02),
+        (23, 115, 0x02),
+        (40, 115, 0x03),
+        (2, 116, 0x02),
+        (9, 116, 0x02),
+        (23, 116, 0x02),
+        (40, 116, 0x03),
+        (1, 32, 0x02),
+        (22, 32, 0x03),
+        (1, 37, 0x02),
+        (22, 37, 0x03),
+        (1, 45, 0x02),
+        (22, 45, 0x03),
+        (1, 46, 0x02),
+        (22, 46, 0x03),
+    ],
+    // 11
+    [
+        (3, 115, 0x02),
+        (6, 115, 0x02),
+        (10, 115, 0x02),
+        (15, 115, 0x02),
+        (24, 115, 0x02),
+        (31, 115, 0x02),
+        (41, 115, 0x02),
+        (56, 115, 0x03),
+        (3, 116, 0x02),
+        (6, 116, 0x02),
+        (10, 116, 0x02),
+        (15, 116, 0x02),
+        (24, 116, 0x02),
+        (31, 116, 0x02),
+        (41, 116, 0x02),
+        (56, 116, 0x03),
+    ],
+    // 12
+    [
+        (2, 32, 0x02),
+        (9, 32, 0x02),
+        (23, 32, 0x02),
+        (40, 32, 0x03),
+        (2, 37, 0x02),
+        (9, 37, 0x02),
+        (23, 37, 0x02),
+        (40, 37, 0x03),
+        (2, 45, 0x02),
+        (9, 45, 0x02),
+        (23, 45, 0x02),
+        (40, 45, 0x03),
+        (2, 46, 0x02),
+        (9, 46, 0x02),
+        (23, 46, 0x02),
+        (40, 46, 0x03),
+    ],
+    // 13
+    [
+        (3, 32, 0x02),
+        (6, 32, 0x02),
+        (10, 32, 0x02),
+        (15, 32, 0x02),
+        (24, 32, 0x02),
+        (31, 32, 0x02),
+        (41, 32, 0x02),
+        (56, 32, 0x03),
+        (3, 37, 0x02),
+        (6, 37, 0x02),
+        (10, 37, 0x02),
+        (15, 37, 0x02),
+        (24, 37, 0x02),
+        (31, 37, 0x02),
+        (41, 37, 0x02),
+        (56, 37, 0x03),
+    ],
+    // 14
+    [
+        (3, 45, 0x02),
+        (6, 45, 0x02),
+        (10, 45, 0x02),
+        (15, 45, 0x02),
+        (24, 45, 0x02),
+        (31, 45, 0x02),
+        (41, 45, 0x02),
+        (56, 45, 0x03),
+        (3, 46, 0x02),
+        (6, 46, 0x02),
+        (10, 46, 0x02),
+        (15, 46, 0x02),
+        (24, 46, 0x02),
+        (31, 46, 0x02),
+        (41, 46, 0x02),
+        (56, 46, 0x03),
+    ],
+    // 15
+    [
+        (1, 47, 0x02),
+        (22, 47, 0x03),
+        (1, 51, 0x02),
+        (22, 51, 0x03),
+        (1, 52, 0x02),
+        (22, 52, 0x03),
+        (1, 53, 0x02),
+        (22, 53, 0x03),
+        (1, 54, 0x02),
+        (22, 54, 0x03),
+        (1, 55, 0x02),
+        (22, 55, 0x03),
+        (1, 56, 0x02),
+        (22, 56, 0x03),
+        (1, 57, 0x02),
+        (22, 57, 0x03),
+    ],
+    // 16
+    [
+        (2, 47, 0x02),
+        (9, 47, 0x02),
+        (23, 47, 0x02),
+        (40, 47, 0x03),
+        (2, 51, 0x02),
+        (9, 51, 0x02),
+        (23, 51, 0x02),
+        (40, 51, 0x03),
+        (2, 52, 0x02),
+        (9, 52, 0x02),
+        (23, 52, 0x02),
+        (40, 52, 0x03),
+        (2, 53, 0x02),
+        (9, 53, 0x02),
+        (23, 53, 0x02),
+        (40, 53, 0x03),
+    ],
+    // 17
+    [
+        (3, 47, 0x02),
+        (6, 47, 0x02),
+        (10, 47, 0x02),
+        (15, 47, 0x02),
+        (24, 47, 0x02),
+        (31, 47, 0x02),
+        (41, 47, 0x02),
+        (56, 47, 0x03),
+        (3, 51, 0x02),
+        (6, 51, 0x02),
+        (10, 51, 0x02),
+        (15, 51, 0x02),
+        (24, 51, 0x02),
+        (31, 51, 0x02),
+        (41, 51, 0x02),
+        (56, 51, 0x03),
+    ],
+    // 18
+    [
+        (3, 52, 0x02),
+        (6, 52, 0x02),
+        (10, 52, 0x02),
+        (15, 52, 0x02),
+        (24, 52, 0x02),
+        (31, 52, 0x02),
+        (41, 52, 0x02),
+        (56, 52, 0x03),
+        (3, 53, 0x02),
+        (6, 53, 0x02),
+        (10, 53, 0x02),
+        (15, 53, 0x02),
+        (24, 53, 0x02),
+        (31, 53, 0x02),
+        (41, 53, 0x02),
+        (56, 53, 0x03),
+    ],
+    // 19
+    [
+        (2, 54, 0x02),
+        (9, 54, 0x02),
+        (23, 54, 0x02),
+        (40, 54, 0x03),
+        (2, 55, 0x02),
+        (9, 55, 0x02),
+        (23, 55, 0x02),
+        (40, 55, 0x03),
+        (2, 56, 0x02),
+        (9, 56, 0x02),
+        (23, 56, 0x02),
+        (40, 56, 0x03),
+        (2, 57, 0x02),
+        (9, 57, 0x02),
+        (23, 57, 0x02),
+        (40, 57, 0x03),
+    ],
+    // 20
+    [
+        (3, 54, 0x02),
+        (6, 54, 0x02),
+        (10, 54, 0x02),
+        (15, 54, 0x02),
+        (24, 54, 0x02),
+        (31, 54, 0x02),
+        (41, 54, 0x02),
+        (56, 54, 0x03),
+        (3, 55, 0x02),
+        (6, 55, 0x02),
+        (10, 55, 0x02),
+        (15, 55, 0x02),
+        (24, 55, 0x02),
+        (31, 55, 0x02),
+        (41, 55, 0x02),
+        (56, 55, 0x03),
+    ],
+    // 21
+    [
+        (3, 56, 0x02),
+        (6, 56, 0x02),
+        (10, 56, 0x02),
+        (15, 56, 0x02),
+        (24, 56, 0x02),
+        (31, 56, 0x02),
+        (41, 56, 0x02),
+        (56, 56, 0x03),
+        (3, 57, 0x02),
+        (6, 57, 0x02),
+        (10, 57, 0x02),
+        (15, 57, 0x02),
+        (24, 57, 0x02),
+        (31, 57, 0x02),
+        (41, 57, 0x02),
+        (56, 57, 0x03),
+    ],
+    // 22
+    [
+        (26, 0, 0x00),
+        (27, 0, 0x00),
+        (29, 0, 0x00),
+        (30, 0, 0x00),
+        (33, 0, 0x00),
+        (34, 0, 0x00),
+        (36, 0, 0x00),
+        (37, 0, 0x00),
+        (43, 0, 0x00),
+        (46, 0, 0x00),
+        (50, 0, 0x00),
+        (53, 0, 0x00),
+        (58, 0, 0x00),
+        (61, 0, 0x00),
+        (65, 0, 0x00),
+        (68, 0, 0x01),
+    ],
+    // 23
+    [
+        (0, 61, 0x02),
+        (0, 65, 0x02),
+        (0, 95, 0x02),
+        (0, 98, 0x02),
+        (0, 100, 0x02),
+        (0, 102, 0x02),
+        (0, 103, 0x02),
+        (0, 104, 0x02),
+        (0, 108, 0x02),
+        (0, 109, 0x02),
+        (0, 110, 0x02),
+        (0, 112, 0x02),
+        (0, 114, 0x02),
+        (0, 117, 0x02),
+        (38, 0, 0x00),
+        (39, 0, 0x00),
+    ],
+    // 24
+    [
+        (1, 61, 0x02),
+        (22, 61, 0x03),
+        (1, 65, 0x02),
+        (22, 65, 0x03),
+        (1, 95, 0x02),
+        (22, 95, 0x03),
+        (1, 98, 0x02),
+        (22, 98, 0x03),
+        (1, 100, 0x02),
+        (22, 100, 0x03),
+        (1, 102, 0x02),
+        (22, 102, 0x03),
+        (1, 103, 0x02),
+        (22, 103, 0x03),
+        (1, 104, 0x02),
+        (22, 104, 0x03),
+    ],
+    // 25
+    [
+        (2, 61, 0x02),
+        (9, 61, 0x02),
+        (23, 61, 0x02),
+        (40, 61, 0x03),
+        (2, 65, 0x02),
+        (9, 65, 0x02),
+        (23, 65, 0x02),
+        (40, 65, 0x03),
+        (2, 95, 0x02),
+        (9, 95, 0x02),
+        (23, 95, 0x02),
+        (40, 95, 0x03),
+        (2, 98, 0x02),
+        (9, 98, 0x02),
+        (23, 98, 0x02),
+        (40, 98, 0x03),
+    ],
+    // 26
+    [
+        (3, 61, 0x02),
+        (6, 61, 0x02),
+        (10, 61, 0x02),
+        (15, 61, 0x02),
+        (24, 61, 0x02),
+        (31, 61, 0x02),
+        (41, 61, 0x02),
+        (56, 61, 0x03),
+        (3, 65, 0x02),
+        (6, 65, 0x02),
+        (10, 65, 0x02),
+        (15, 65, 0x02),
+        (24, 65, 0x02),
+        (31, 65, 0x02),
+        (41, 65, 0x02),
+        (56, 65, 0x03),
+    ],
+    // 27
+    [
+        (3, 95, 0x02),
+        (6, 95, 0x02),
+        (10, 95, 0x02),
+        (15, 95, 0x02),
+        (24, 95, 0x02),
+        (31, 95, 0x02),
+        (41, 95, 0x02),
+        (56, 95, 0x03),
+        (3, 98, 0x02),
+        (6, 98, 0x02),
+        (10, 98, 0x02),
+        (15, 98, 0x02),
+        (24, 98, 0x02),
+        (31, 98, 0x02),
+        (41, 98, 0x02),
+        (56, 98, 0x03),
+    ],
+    // 28
+    [
+        (2, 100, 0x02),
+        (9, 100, 0x02),
+        (23, 100, 0x02),
+        (40, 100, 0x03),
+        (2, 102, 0x02),
+        (9, 102, 0x02),
+        (23, 102, 0x02),
+        (40, 102, 0x03),
+        (2, 103, 0x02),
+        (9, 103, 0x02),
+        (23, 103, 0x02),
+        (40, 103, 0x03),
+        (2, 104, 0x02),
+        (9, 104, 0x02),
+        (23, 104, 0x02),
+        (40, 104, 0x03),
+    ],
+    // 29
+    [
+        (3, 100, 0x02),
+        (6, 100, 0x02),
+        (10, 100, 0x02),
+        (15, 100, 0x02),
+        (24, 100, 0x02),
+        (31, 100, 0x02),
+        (41, 100, 0x02),
+        (56, 100, 0x03),
+        (3, 102, 0x02),
+        (6, 102, 0x02),
+        (10, 102, 0x02),
+        (15, 102, 0x02),
+        (24, 102, 0x02),
+        (31, 102, 0x02),
+        (41, 102, 0x02),
+        (56, 102, 0x03),
+    ],
+    // 30
+    [
+        (3, 103, 0x02),
+        (6, 103, 0x02),
+        (10, 103, 0x02),
+        (15, 103, 0x02),
+        (24, 103, 0x02),
+        (31, 103, 0x02),
+        (41, 103, 0x02),
+        (56, 103, 0x03),
+        (3, 104, 0x02),
+        (6, 104, 0x02),
+        (10, 104, 0x02),
+        (15, 104, 0x02),
+        (24, 104, 0x02),
+        (31, 104, 0x02),
+        (41, 104, 0x02),
+        (56, 104, 0x03),
+    ],
+    // 31
+    [
+        (1, 108, 0x02),
+        (22, 108, 0x03),
+        (1, 109, 0x02),
+        (22, 109, 0x03),
+        (1, 110, 0x02),
+        (22, 110, 0x03),
+        (1, 112, 0x02),
+        (22, 112, 0x03),
+        (1, 114, 0x02),
+        (22, 114, 0x03),
+        (1, 117, 0x02),
+        (22, 117, 0x03),
+        (0, 58, 0x02),
+        (0, 66, 0x02),
+        (0, 67, 0x02),
+        (0, 68, 0x02),
+    ],
+    // 32
+    [
+        (2, 108, 0x02),
+        (9, 108, 0x02),
+        (23, 108, 0x02),
+        (40, 108, 0x03),
+        (2, 109, 0x02),
+        (9, 109, 0x02),
+        (23, 109, 0x02),
+        (40, 109, 0x03),
+        (2, 110, 0x02),
+        (9, 110, 0x02),
+        (23, 110, 0x02),
+        (40, 110, 0x03),
+        (2, 112, 0x02),
+        (9, 112, 0x02),
+        (23, 112, 0x02),
+        (40, 112, 0x03),
+    ],
+    // 33
+    [
+        (3, 108, 0x02),
+        (6, 108, 0x02),
+        (10, 108, 0x02),
+        (15, 108, 0x02),
+        (24, 108, 0x02),
+        (31, 108, 0x02),
+        (41, 108, 0x02),
+        (56, 108, 0x03),
+        (3, 109, 0x02),
+        (6, 109, 0x02),
+        (10, 109, 0x02),
+        (15, 109, 0x02),
+        (24, 109, 0x02),
+        (31, 109, 0x02),
+        (41, 109, 0x02),
+        (56, 109, 0x03),
+    ],
+    // 34
+    [
+        (3, 110, 0x02),
+        (6, 110, 0x02),
+        (10, 110, 0x02),
+        (15, 110, 0x02),
+        (24, 110, 0x02),
+        (31, 110, 0x02),
+        (41, 110, 0x02),
+        (56, 110, 0x03),
+        (3, 112, 0x02),
+        (6, 112, 0x02),
+        (10, 112, 0x02),
+        (15, 112, 0x02),
+        (24, 112, 0x02),
+        (31, 112, 0x02),
+        (41, 112, 0x02),
+        (56, 112, 0x03),
+    ],
+    // 35
+    [
+        (2, 114, 0x02),
+        (9, 114, 0x02),
+        (23, 114, 0x02),
+        (40, 114, 0x03),
+        (2, 117, 0x02),
+        (9, 117, 0x02),
+        (23, 117, 0x02),
+        (40, 117, 0x03),
+        (1, 58, 0x02),
+        (22, 58, 0x03),
+        (1, 66, 0x02),
+        (22, 66, 0x03),
+        (1, 67, 0x02),
+        (22, 67, 0x03),
+        (1, 68, 0x02),
+        (22, 68, 0x03),
+    ],
+    // 36
+    [
+        (3, 114, 0x02),
+        (6, 114, 0x02),
+        (10, 114, 0x02),
+        (15, 114, 0x02),
+        (24, 114, 0x02),
+        (31, 114, 0x02),
+        (41, 114, 0x02),
+        (56, 114, 0x03),
+        (3, 117, 0x02),
+        (6, 117, 0x02),
+        (10, 117, 0x02),
+        (15, 117, 0x02),
+        (24, 117, 0x02),
+        (31, 117, 0x02),
+        (41, 117, 0x02),
+        (56, 117, 0x03),
+    ],
+    // 37
+    [
+        (2, 58, 0x02),
+        (9, 58, 0x02),
+        (23, 58, 0x02),
+        (40, 58, 0x03),
+        (2, 66, 0x02),
+        (9, 66, 0x02),
+        (23, 66, 0x02),
+        (40, 66, 0x03),
+        (2, 67, 0x02),
+        (9, 67, 0x02),
+        (23, 67, 0x02),
+        (40, 67, 0x03),
+        (2, 68, 0x02),
+        (9, 68, 0x02),
+        (23, 68, 0x02),
+        (40, 68, 0x03),
+    ],
+    // 38
+    [
+        (3, 58, 0x02),
+        (6, 58, 0x02),
+        (10, 58, 0x02),
+        (15, 58, 0x02),
+        (24, 58, 0x02),
+        (31, 58, 0x02),
+        (41, 58, 0x02),
+        (56, 58, 0x03),
+        (3, 66, 0x02),
+        (6, 66, 0x02),
+        (10, 66, 0x02),
+        (15, 66, 0x02),
+        (24, 66, 0x02),
+        (31, 66, 0x02),
+        (41, 66, 0x02),
+        (56, 66, 0x03),
+    ],
+    // 39
+    [
+        (3, 67, 0x02),
+        (6, 67, 0x02),
+        (10, 67, 0x02),
+        (15, 67, 0x02),
+        (24, 67, 0x02),
+        (31, 67, 0x02),
+        (41, 67, 0x02),
+        (56, 67, 0x03),
+        (3, 68, 0x02),
+        (6, 68, 0x02),
+        (10, 68, 0x02),
+        (15, 68, 0x02),
+        (24, 68, 0x02),
+        (31, 68, 0x02),
+        (41, 68, 0x02),
+        (56, 68, 0x03),
+    ],
+    // 40
+    [
+        (44, 0, 0x00),
+        (45, 0, 0x00),
+        (47, 0, 0x00),
+        (48, 0, 0x00),
+        (51, 0, 0x00),
+        (52, 0, 0x00),
+        (54, 0, 0x00),
+        (55, 0, 0x00),
+        (59, 0, 0x00),
+        (60, 0, 0x00),
+        (62, 0, 0x00),
+        (63, 0, 0x00),
+        (66, 0, 0x00),
+        (67, 0, 0x00),
+        (69, 0, 0x00),
+        (72, 0, 0x01),
+    ],
+    // 41
+    [
+        (0, 69, 0x02),
+        (0, 70, 0x02),
+        (0, 71, 0x02),
+        (0, 72, 0x02),
+        (0, 73, 0x02),
+        (0, 74, 0x02),
+        (0, 75, 0x02),
+        (0, 76, 0x02),
+        (0, 77, 0x02),
+        (0, 78, 0x02),
+        (0, 79, 0x02),
+        (0, 80, 0x02),
+        (0, 81, 0x02),
+        (0, 82, 0x02),
+        (0, 83, 0x02),
+        (0, 84, 0x02),
+    ],
+    // 42
+    [
+        (1, 69, 0x02),
+        (22, 69, 0x03),
+        (1, 70, 0x02),
+        (22, 70, 0x03),
+        (1, 71, 0x02),
+        (22, 71, 0x03),
+        (1, 72, 0x02),
+        (22, 72, 0x03),
+        (1, 73, 0x02),
+        (22, 73, 0x03),
+        (1, 74, 0x02),
+        (22, 74, 0x03),
+        (1, 75, 0x02),
+        (22, 75, 0x03),
+        (1, 76, 0x02),
+        (22, 76, 0x03),
+    ],
+    // 43
+    [
+        (2, 69, 0x02),
+        (9, 69, 0x02),
+        (23, 69, 0x02),
+        (40, 69, 0x03),
+        (2, 70, 0x02),
+        (9, 70, 0x02),
+        (23, 70, 0x02),
+        (40, 70, 0x03),
+        (2, 71, 0x02),
+        (9, 71, 0x02),
+        (23, 71, 0x02),
+        (40, 71, 0x03),
+        (2, 72, 0x02),
+        (9, 72, 0x02),
+        (23, 72, 0x02),
+        (40, 72, 0x03),
+    ],
+    // 44
+    [
+        (3, 69, 0x02),
+        (6, 69, 0x02),
+        (10, 69, 0x02),
+        (15, 69, 0x02),
+        (24, 69, 0x02),
+        (31, 69, 0x02),
+        (41, 69, 0x02),
+        (56, 69, 0x03),
+        (3, 70, 0x02),
+        (6, 70, 0x02),
+        (10, 70, 0x02),
+        (15, 70, 0x02),
+        (24, 70, 0x02),
+        (31, 70, 0x02),
+        (41, 70, 0x02),
+        (56, 70, 0x03),
+    ],
+    // 45
+    [
+        (3, 71, 0x02),
+        (6, 71, 0x02),
+        (10, 71, 0x02),
+        (15, 71, 0x02),
+        (24, 71, 0x02),
+        (31, 71, 0x02),
+        (41, 71, 0x02),
+        (56, 71, 0x03),
+        (3, 72, 0x02),
+        (6, 72, 0x02),
+        (10, 72, 0x02),
+        (15, 72, 0x02),
+        (24, 72, 0x02),
+        (31, 72, 0x02),
+        (41, 72, 0x02),
+        (56, 72, 0x03),
+    ],
+    // 46
+    [
+        (2, 73, 0x02),
+        (9, 73, 0x02),
+        (23, 73, 0x02),
+        (40, 73, 0x03),
+        (2, 74, 0x02),
+        (9, 74, 0x02),
+        (23, 74, 0x02),
+        (40, 74, 0x03),
+        (2, 75, 0x02),
+        (9, 75, 0x02),
+        (23, 75, 0x02),
+        (40, 75, 0x03),
+        (2, 76, 0x02),
+        (9, 76, 0x02),
+        (23, 76, 0x02),
+        (40, 76, 0x03),
+    ],
+    // 47
+    [
+        (3, 73, 0x02),
+        (6, 73, 0x02),
+        (10, 73, 0x02),
+        (15, 73, 0x02),
+        (24, 73, 0x02),
+        (31, 73, 0x02),
+        (41, 73, 0x02),
+        (56, 73, 0x03),
+        (3, 74, 0x02),
+        (6, 74, 0x02),
+        (10, 74, 0x02),
+        (15, 74, 0x02),
+        (24, 74, 0x02),
+        (31, 74, 0x02),
+        (41, 74, 0x02),
+        (56, 74, 0x03),
+    ],
+    // 48
+    [
+        (3, 75, 0x02),
+        (6, 75, 0x02),
+        (10, 75, 0x02),
+        (15, 75, 0x02),
+        (24, 75, 0x02),
+        (31, 75, 0x02),
+        (41, 75, 0x02),
+        (56, 75, 0x03),
+        (3, 76, 0x02),
+        (6, 76, 0x02),
+        (10, 76, 0x02),
+        (15, 76, 0x02),
+        (24, 76, 0x02),
+        (31, 76, 0x02),
+        (41, 76, 0x02),
+        (56, 76, 0x03),
+    ],
+    // 49
+    [
+        (1, 77, 0x02),
+        (22, 77, 0x03),
+        (1, 78, 0x02),
+        (22, 78, 0x03),
+        (1, 79, 0x02),
+        (22, 79, 0x03),
+        (1, 80, 0x02),
+        (22, 80, 0x03),
+        (1, 81, 0x02),
+        (22, 81, 0x03),
+        (1, 82, 0x02),
+        (22, 82, 0x03),
+        (1, 83, 0x02),
+        (22, 83, 0x03),
+        (1, 84, 0x02),
+        (22, 84, 0x03),
+    ],
+    // 50
+    [
+        (2, 77, 0x02),
+        (9, 77, 0x02),
+        (23, 77, 0x02),
+        (40, 77, 0x03),
+        (2, 78, 0x02),
+        (9, 78, 0x02),
+        (23, 78, 0x02),
+        (40, 78, 0x03),
+        (2, 79, 0x02),
+        (9, 79, 0x02),
+        (23, 79, 0x02),
+        (40, 79, 0x03),
+        (2, 80, 0x02),
+        (9, 80, 0x02),
+        (23, 80, 0x02),
+        (40, 80, 0x03),
+    ],
+    // 51
+    [
+        (3, 77, 0x02),
+        (6, 77, 0x02),
+        (10, 77, 0x02),
+        (15, 77, 0x02),
+        (24, 77, 0x02),
+        (31, 77, 0x02),
+        (41, 77, 0x02),
+        (56, 77, 0x03),
+        (3, 78, 0x02),
+        (6, 78, 0x02),
+        (10, 78, 0x02),
+        (15, 78, 0x02),
+        (24, 78, 0x02),
+        (31, 78, 0x02),
+        (41, 78, 0x02),
+        (56, 78, 0x03),
+    ],
+    // 52
+    [
+        (3, 79, 0x02),
+        (6, 79, 0x02),
+        (10, 79, 0x02),
+        (15, 79, 0x02),
+        (24, 79, 0x02),
+        (31, 79, 0x02),
+        (41, 79, 0x02),
+        (56, 79, 0x03),
+        (3, 80, 0x02),
+        (6, 80, 0x02),
+        (10, 80, 0x02),
+        (15, 80, 0x02),
+        (24, 80, 0x02),
+        (31, 80, 0x02),
+        (41, 80, 0x02),
+        (56, 80, 0x03),
+    ],
+    // 53
+    [
+        (2, 81, 0x02),
+        (9, 81, 0x02),
+        (23, 81, 0x02),
+        (40, 81, 0x03),
+        (2, 82, 0x02),
+        (9, 82, 0x02),
+        (23, 82, 0x02),
+        (40, 82, 0x03),
+        (2, 83, 0x02),
+        (9, 83, 0x02),
+        (23, 83, 0x02),
+        (40, 83, 0x03),
+        (2, 84, 0x02),
+        (9, 84, 0x02),
+        (23, 84, 0x02),
+        (40, 84, 0x03),
+    ],
+    // 54
+    [
+        (3, 81, 0x02),
+        (6, 81, 0x02),
+        (10, 81, 0x02),
+        (15, 81, 0x02),
+        (24, 81, 0x02),
+        (31, 81, 0x02),
+        (41, 81, 0x02),
+        (56, 81, 0x03),
+        (3, 82, 0x02),
+        (6, 82, 0x02),
+        (10, 82, 0x02),
+        (15, 82, 0x02),
+        (24, 82, 0x02),
+        (31, 82, 0x02),
+        (41, 82, 0x02),
+        (56, 82, 0x03),
+    ],
+    // 55
+    [
+        (3, 83, 0x02),
+        (6, 83, 0x02),
+        (10, 83, 0x02),
+        (15, 83, 0x02),
+        (24, 83, 0x02),
+        (31, 83, 0x02),
+        (41, 83, 0x02),
+        (56, 83, 0x03),
+        (3, 84, 0x02),
+        (6, 84, 0x02),
+        (10, 84, 0x02),
+        (15, 84, 0x02),
+        (24, 84, 0x02),
+        (31, 84, 0x02),
+        (41, 84, 0x02),
+        (56, 84, 0x03),
+    ],
+    // 56
+    [
+        (0, 85, 0x02),
+        (0, 86, 0x02),
+        (0, 87, 0x02),
+        (0, 89, 0x02),
+        (0, 106, 0x02),
+        (0, 107, 0x02),
+        (0, 113, 0x02),
+        (0, 118, 0x02),
+        (0, 119, 0x02),
+        (0, 120, 0x02),
+        (0, 121, 0x02),
+        (0, 122, 0x02),
+        (70, 0, 0x00),
+        (71, 0, 0x00),
+        (73, 0, 0x00),
+        (74, 0, 0x01),
+    ],
+    // 57
+    [
+        (1, 85, 0x02),
+        (22, 85, 0x03),
+        (1, 86, 0x02),
+        (22, 86, 0x03),
+        (1, 87, 0x02),
+        (22, 87, 0x03),
+        (1, 89, 0x02),
+        (22, 89, 0x03),
+        (1, 106, 0x02),
+        (22, 106, 0x03),
+        (1, 107, 0x02),
+        (22, 107, 0x03),
+        (1, 113, 0x02),
+        (22, 113, 0x03),
+        (1, 118, 0x02),
+        (22, 118, 0x03),
+    ],
+    // 58
+    [
+        (2, 85, 0x02),
+        (9, 85, 0x02),
+        (23, 85, 0x02),
+        (40, 85, 0x03),
+        (2, 86, 0x02),
+        (9, 86, 0x02),
+        (23, 86, 0x02),
+        (40, 86, 0x03),
+        (2, 87, 0x02),
+        (9, 87, 0x02),
+        (23, 87, 0x02),
+        (40, 87, 0x03),
+        (2, 89, 0x02),
+        (9, 89, 0x02),
+        (23, 89, 0x02),
+        (40, 89, 0x03),
+    ],
+    // 59
+    [
+        (3, 85, 0x02),
+        (6, 85, 0x02),
+        (10, 85, 0x02),
+        (15, 85, 0x02),
+        (24, 85, 0x02),
+        (31, 85, 0x02),
+        (41, 85, 0x02),
+        (56, 85, 0x03),
+        (3, 86, 0x02),
+        (6, 86, 0x02),
+        (10, 86, 0x02),
+        (15, 86, 0x02),
+        (24, 86, 0x02),
+        (31, 86, 0x02),
+        (41, 86, 0x02),
+        (56, 86, 0x03),
+    ],
+    // 60
+    [
+        (3, 87, 0x02),
+        (6, 87, 0x02),
+        (10, 87, 0x02),
+        (15, 87, 0x02),
+        (24, 87, 0x02),
+        (31, 87, 0x02),
+        (41, 87, 0x02),
+        (56, 87, 0x03),
+        (3, 89, 0x02),
+        (6, 89, 0x02),
+        (10, 89, 0x02),
+        (15, 89, 0x02),
+        (24, 89, 0x02),
+        (31, 89, 0x02),
+        (41, 89, 0x02),
+        (56, 89, 0x03),
+    ],
+    // 61
+    [
+        (2, 106, 0x02),
+        (9, 106, 0x02),
+        (23, 106, 0x02),
+        (40, 106, 0x03),
+        (2, 107, 0x02),
+        (9, 107, 0x02),
+        (23, 107, 0x02),
+        (40, 107, 0x03),
+        (2, 113, 0x02),
+        (9, 113, 0x02),
+        (23, 113, 0x02),
+        (40, 113, 0x03),
+        (2, 118, 0x02),
+        (9, 118, 0x02),
+        (23, 118, 0x02),
+        (40, 118, 0x03),
+    ],
+    // 62
+    [
+        (3, 106, 0x02),
+        (6, 106, 0x02),
+        (10, 106, 0x02),
+        (15, 106, 0x02),
+        (24, 106, 0x02),
+        (31, 106, 0x02),
+        (41, 106, 0x02),
+        (56, 106, 0x03),
+        (3, 107, 0x02),
+        (6, 107, 0x02),
+        (10, 107, 0x02),
+        (15, 107, 0x02),
+        (24, 107, 0x02),
+        (31, 107, 0x02),
+        (41, 107, 0x02),
+        (56, 107, 0x03),
+    ],
+    // 63
+    [
+        (3, 113, 0x02),
+        (6, 113, 0x02),
+        (10, 113, 0x02),
+        (15, 113, 0x02),
+        (24, 113, 0x02),
+        (31, 113, 0x02),
+        (41, 113, 0x02),
+        (56, 113, 0x03),
+        (3, 118, 0x02),
+        (6, 118, 0x02),
+        (10, 118, 0x02),
+        (15, 118, 0x02),
+        (24, 118, 0x02),
+        (31, 118, 0x02),
+        (41, 118, 0x02),
+        (56, 118, 0x03),
+    ],
+    // 64
+    [
+        (1, 119, 0x02),
+        (22, 119, 0x03),
+        (1, 120, 0x02),
+        (22, 120, 0x03),
+        (1, 121, 0x02),
+        (22, 121, 0x03),
+        (1, 122, 0x02),
+        (22, 122, 0x03),
+        (0, 38, 0x02),
+        (0, 42, 0x02),
+        (0, 44, 0x02),
+        (0, 59, 0x02),
+        (0, 88, 0x02),
+        (0, 90, 0x02),
+        (75, 0, 0x00),
+        (78, 0, 0x00),
+    ],
+    // 65
+    [
+        (2, 119, 0x02),
+        (9, 119, 0x02),
+        (23, 119, 0x02),
+        (40, 119, 0x03),
+        (2, 120, 0x02),
+        (9, 120, 0x02),
+        (23, 120, 0x02),
+        (40, 120, 0x03),
+        (2, 121, 0x02),
+        (9, 121, 0x02),
+        (23, 121, 0x02),
+        (40, 121, 0x03),
+        (2, 122, 0x02),
+        (9, 122, 0x02),
+        (23, 122, 0x02),
+        (40, 122, 0x03),
+    ],
+    // 66
+    [
+        (3, 119, 0x02),
+        (6, 119, 0x02),
+        (10, 119, 0x02),
+        (15, 119, 0x02),
+        (24, 119, 0x02),
+        (31, 119, 0x02),
+        (41, 119, 0x02),
+        (56, 119, 0x03),
+        (3, 120, 0x02),
+        (6, 120, 0x02),
+        (10, 120, 0x02),
+        (15, 120, 0x02),
+        (24, 120, 0x02),
+        (31, 120, 0x02),
+        (41, 120, 0x02),
+        (56, 120, 0x03),
+    ],
+    // 67
+    [
+        (3, 121, 0x02),
+        (6, 121, 0x02),
+        (10, 121, 0x02),
+        (15, 121, 0x02),
+        (24, 121, 0x02),
+        (31, 121, 0x02),
+        (41, 121, 0x02),
+        (56, 121, 0x03),
+        (3, 122, 0x02),
+        (6, 122, 0x02),
+        (10, 122, 0x02),
+        (15, 122, 0x02),
+        (24, 122, 0x02),
+        (31, 122, 0x02),
+        (41, 122, 0x02),
+        (56, 122, 0x03),
+    ],
+    // 68
+    [
+        (1, 38, 0x02),
+        (22, 38, 0x03),
+        (1, 42, 0x02),
+        (22, 42, 0x03),
+        (1, 44, 0x02),
+        (22, 44, 0x03),
+        (1, 59, 0x02),
+        (22, 59, 0x03),
+        (1, 88, 0x02),
+        (22, 88, 0x03),
+        (1, 90, 0x02),
+        (22, 90, 0x03),
+        (76, 0, 0x00),
+        (77, 0, 0x00),
+        (79, 0, 0x00),
+        (81, 0, 0x00),
+    ],
+    // 69
+    [
+        (2, 38, 0x02),
+        (9, 38, 0x02),
+        (23, 38, 0x02),
+        (40, 38, 0x03),
+        (2, 42, 0x02),
+        (9, 42, 0x02),
+        (23, 42, 0x02),
+        (40, 42, 0x03),
+        (2, 44, 0x02),
+        (9, 44, 0x02),
+        (23, 44, 0x02),
+        (40, 44, 0x03),
+        (2, 59, 0x02),
+        (9, 59, 0x02),
+        (23, 59, 0x02),
+        (40, 59, 0x03),
+    ],
+    // 70
+    [
+        (3, 38, 0x02),
+        (6, 38, 0x02),
+        (10, 38, 0x02),
+        (15, 38, 0x02),
+        (24, 38, 0x02),
+        (31, 38, 0x02),
+        (41, 38, 0x02),
+        (56, 38, 0x03),
+        (3, 42, 0x02),
+        (6, 42, 0x02),
+        (10, 42, 0x02),
+        (15, 42, 0x02),
+        (24, 42, 0x02),
+        (31, 42, 0x02),
+        (41, 42, 0x02),
+        (56, 42, 0x03),
+    ],
+    // 71
+    [
+        (3, 44, 0x02),
+        (6, 44, 0x02),
+        (10, 44, 0x02),
+        (15, 44, 0x02),
+        (24, 44, 0x02),
+        (31, 44, 0x02),
+        (41, 44, 0x02),
+        (56, 44, 0x03),
+        (3, 59, 0x02),
+        (6, 59, 0x02),
+        (10, 59, 0x02),
+        (15, 59, 0x02),
+        (24, 59, 0x02),
+        (31, 59, 0x02),
+        (41, 59, 0x02),
+        (56, 59, 0x03),
+    ],
+    // 72
+    [
+        (2, 88, 0x02),
+        (9, 88, 0x02),
+        (23, 88, 0x02),
+        (40, 88, 0x03),
+        (2, 90, 0x02),
+        (9, 90, 0x02),
+        (23, 90, 0x02),
+        (40, 90, 0x03),
+        (0, 33, 0x02),
+        (0, 34, 0x02),
+        (0, 40, 0x02),
+        (0, 41, 0x02),
+        (0, 63, 0x02),
+        (80, 0, 0x00),
+        (82, 0, 0x00),
+        (84, 0, 0x00),
+    ],
+    // 73
+    [
+        (3, 88, 0x02),
+        (6, 88, 0x02),
+        (10, 88, 0x02),
+        (15, 88, 0x02),
+        (24, 88, 0x02),
+        (31, 88, 0x02),
+        (41, 88, 0x02),
+        (56, 88, 0x03),
+        (3, 90, 0x02),
+        (6, 90, 0x02),
+        (10, 90, 0x02),
+        (15, 90, 0x02),
+        (24, 90, 0x02),
+        (31, 90, 0x02),
+        (41, 90, 0x02),
+        (56, 90, 0x03),
+    ],
+    // 74
+    [
+        (1, 33, 0x02),
+        (22, 33, 0x03),
+        (1, 34, 0x02),
+        (22, 34, 0x03),
+        (1, 40, 0x02),
+        (22, 40, 0x03),
+        (1, 41, 0x02),
+        (22, 41, 0x03),
+        (1, 63, 0x02),
+        (22, 63, 0x03),
+        (0, 39, 0x02),
+        (0, 43, 0x02),
+        (0, 124, 0x02),
+        (83, 0, 0x00),
+        (85, 0, 0x00),
+        (88, 0, 0x00),
+    ],
+    // 75
+    [
+        (2, 33, 0x02),
+        (9, 33, 0x02),
+        (23, 33, 0x02),
+        (40, 33, 0x03),
+        (2, 34, 0x02),
+        (9, 34, 0x02),
+        (23, 34, 0x02),
+        (40, 34, 0x03),
+        (2, 40, 0x02),
+        (9, 40, 0x02),
+        (23, 40, 0x02),
+        (40, 40, 0x03),
+        (2, 41, 0x02),
+        (9, 41, 0x02),
+        (23, 41, 0x02),
+        (40, 41, 0x03),
+    ],
+    // 76
+    [
+        (3, 33, 0x02),
+        (6, 33, 0x02),
+        (10, 33, 0x02),
+        (15, 33, 0x02),
+        (24, 33, 0x02),
+        (31, 33, 0x02),
+        (41, 33, 0x02),
+        (56, 33, 0x03),
+        (3, 34, 0x02),
+        (6, 34, 0x02),
+        (10, 34, 0x02),
+        (15, 34, 0x02),
+        (24, 34, 0x02),
+        (31, 34, 0x02),
+        (41, 34, 0x02),
+        (56, 34, 0x03),
+    ],
+    // 77
+    [
+        (3, 40, 0x02),
+        (6, 40, 0x02),
+        (10, 40, 0x02),
+        (15, 40, 0x02),
+        (24, 40, 0x02),
+        (31, 40, 0x02),
+        (41, 40, 0x02),
+        (56, 40, 0x03),
+        (3, 41, 0x02),
+        (6, 41, 0x02),
+        (10, 41, 0x02),
+        (15, 41, 0x02),
+        (24, 41, 0x02),
+        (31, 41, 0x02),
+        (41, 41, 0x02),
+        (56, 41, 0x03),
+    ],
+    // 78
+    [
+        (2, 63, 0x02),
+        (9, 63, 0x02),
+        (23, 63, 0x02),
+        (40, 63, 0x03),
+        (1, 39, 0x02),
+        (22, 39, 0x03),
+        (1, 43, 0x02),
+        (22, 43, 0x03),
+        (1, 124, 0x02),
+        (22, 124, 0x03),
+        (0, 35, 0x02),
+        (0, 62, 0x02),
+        (86, 0, 0x00),
+        (87, 0, 0x00),
+        (89, 0, 0x00),
+        (90, 0, 0x00),
+    ],
+    // 79
+    [
+        (3, 63, 0x02),
+        (6, 63, 0x02),
+        (10, 63, 0x02),
+        (15, 63, 0x02),
+        (24, 63, 0x02),
+        (31, 63, 0x02),
+        (41, 63, 0x02),
+        (56, 63, 0x03),
+        (2, 39, 0x02),
+        (9, 39, 0x02),
+        (23, 39, 0x02),
+        (40, 39, 0x03),
+        (2, 43, 0x02),
+        (9, 43, 0x02),
+        (23, 43, 0x02),
+        (40, 43, 0x03),
+    ],
+    // 80
+    [
+        (3, 39, 0x02),
+        (6, 39, 0x02),
+        (10, 39, 0x02),
+        (15, 39, 0x02),
+        (24, 39, 0x02),
+        (31, 39, 0x02),
+        (41, 39, 0x02),
+        (56, 39, 0x03),
+        (3, 43, 0x02),
+        (6, 43, 0x02),
+        (10, 43, 0x02),
+        (15, 43, 0x02),
+        (24, 43, 0x02),
+        (31, 43, 0x02),
+        (41, 43, 0x02),
+        (56, 43, 0x03),
+    ],
+    // 81
+    [
+        (2, 124, 0x02),
+        (9, 124, 0x02),
+        (23, 124, 0x02),
+        (40, 124, 0x03),
+        (1, 35, 0x02),
+        (22, 35, 0x03),
+        (1, 62, 0x02),
+        (22, 62, 0x03),
+        (0, 0, 0x02),
+        (0, 36, 0x02),
+        (0, 64, 0x02),
+        (0, 91, 0x02),
+        (0, 93, 0x02),
+        (0, 126, 0x02),
+        (91, 0, 0x00),
+        (92, 0, 0x00),
+    ],
+    // 82
+    [
+        (3, 124, 0x02),
+        (6, 124, 0x02),
+        (10, 124, 0x02),
+        (15, 124, 0x02),
+        (24, 124, 0x02),
+        (31, 124, 0x02),
+        (41, 124, 0x02),
+        (56, 124, 0x03),
+        (2, 35, 0x02),
+        (9, 35, 0x02),
+        (23, 35, 0x02),
+        (40, 35, 0x03),
+        (2, 62, 0x02),
+        (9, 62, 0x02),
+        (23, 62, 0x02),
+        (40, 62, 0x03),
+    ],
+    // 83
+    [
+        (3, 35, 0x02),
+        (6, 35, 0x02),
+        (10, 35, 0x02),
+        (15, 35, 0x02),
+        (24, 35, 0x02),
+        (31, 35, 0x02),
+        (41, 35, 0x02),
+        (56, 35, 0x03),
+        (3, 62, 0x02),
+        (6, 62, 0x02),
+        (10, 62, 0x02),
+        (15, 62, 0x02),
+        (24, 62, 0x02),
+        (31, 62, 0x02),
+        (41, 62, 0x02),
+        (56, 62, 0x03),
+    ],
+    // 84
+    [
+        (1, 0, 0x02),
+        (22, 0, 0x03),
+        (1, 36, 0x02),
+        (22, 36, 0x03),
+        (1, 64, 0x02),
+        (22, 64, 0x03),
+        (1, 91, 0x02),
+        (22, 91, 0x03),
+        (1, 93, 0x02),
+        (22, 93, 0x03),
+        (1, 126, 0x02),
+        (22, 126, 0x03),
+        (0, 94, 0x02),
+        (0, 125, 0x02),
+        (93, 0, 0x00),
+        (94, 0, 0x00),
+    ],
+    // 85
+    [
+        (2, 0, 0x02),
+        (9, 0, 0x02),
+        (23, 0, 0x02),
+        (40, 0, 0x03),
+        (2, 36, 0x02),
+        (9, 36, 0x02),
+        (23, 36, 0x02),
+        (40, 36, 0x03),
+        (2, 64, 0x02),
+        (9, 64, 0x02),
+        (23, 64, 0x02),
+        (40, 64, 0x03),
+        (2, 91, 0x02),
+        (9, 91, 0x02),
+        (23, 91, 0x02),
+        (40, 91, 0x03),
+    ],
+    // 86
+    [
+        (3, 0, 0x02),
+        (6, 0, 0x02),
+        (10, 0, 0x02),
+        (15, 0, 0x02),
+        (24, 0, 0x02),
+        (31, 0, 0x02),
+        (41, 0, 0x02),
+        (56, 0, 0x03),
+        (3, 36, 0x02),
+        (6, 36, 0x02),
+        (10, 36, 0x02),
+        (15, 36, 0x02),
+        (24, 36, 0x02),
+        (31, 36, 0x02),
+        (41, 36, 0x02),
+        (56, 36, 0x03),
+    ],
+    // 87
+    [
+        (3, 64, 0x02),
+        (6, 64, 0x02),
+        (10, 64, 0x02),
+        (15, 64, 0x02),
+        (24, 64, 0x02),
+        (31, 64, 0x02),
+        (41, 64, 0x02),
+        (56, 64, 0x03),
+        (3, 91, 0x02),
+        (6, 91, 0x02),
+        (10, 91, 0x02),
+        (15, 91, 0x02),
+        (24, 91, 0x02),
+        (31, 91, 0x02),
+        (41, 91, 0x02),
+        (56, 91, 0x03),
+    ],
+    // 88
+    [
+        (2, 93, 0x02),
+        (9, 93, 0x02),
+        (23, 93, 0x02),
+        (40, 93, 0x03),
+        (2, 126, 0x02),
+        (9, 126, 0x02),
+        (23, 126, 0x02),
+        (40, 126, 0x03),
+        (1, 94, 0x02),
+        (22, 94, 0x03),
+        (1, 125, 0x02),
+        (22, 125, 0x03),
+        (0, 60, 0x02),
+        (0, 96, 0x02),
+        (0, 123, 0x02),
+        (95, 0, 0x00),
+    ],
+    // 89
+    [
+        (3, 93, 0x02),
+        (6, 93, 0x02),
+        (10, 93, 0x02),
+        (15, 93, 0x02),
+        (24, 93, 0x02),
+        (31, 93, 0x02),
+        (41, 93, 0x02),
+        (56, 93, 0x03),
+        (3, 126, 0x02),
+        (6, 126, 0x02),
+        (10, 126, 0x02),
+        (15, 126, 0x02),
+        (24, 126, 0x02),
+        (31, 126, 0x02),
+        (41, 126, 0x02),
+        (56, 126, 0x03),
+    ],
+    // 90
+    [
+        (2, 94, 0x02),
+        (9, 94, 0x02),
+        (23, 94, 0x02),
+        (40, 94, 0x03),
+        (2, 125, 0x02),
+        (9, 125, 0x02),
+        (23, 125, 0x02),
+        (40, 125, 0x03),
+        (1, 60, 0x02),
+        (22, 60, 0x03),
+        (1, 96, 0x02),
+        (22, 96, 0x03),
+        (1, 123, 0x02),
+        (22, 123, 0x03),
+        (96, 0, 0x00),
+        (110, 0, 0x00),
+    ],
+    // 91
+    [
+        (3, 94, 0x02),
+        (6, 94, 0x02),
+        (10, 94, 0x02),
+        (15, 94, 0x02),
+        (24, 94, 0x02),
+        (31, 94, 0x02),
+        (41, 94, 0x02),
+        (56, 94, 0x03),
+        (3, 125, 0x02),
+        (6, 125, 0x02),
+        (10, 125, 0x02),
+        (15, 125, 0x02),
+        (24, 125, 0x02),
+        (31, 125, 0x02),
+        (41, 125, 0x02),
+        (56, 125, 0x03),
+    ],
+    // 92
+    [
+        (2, 60, 0x02),
+        (9, 60, 0x02),
+        (23, 60, 0x02),
+        (40, 60, 0x03),
+        (2, 96, 0x02),
+        (9, 96, 0x02),
+        (23, 96, 0x02),
+        (40, 96, 0x03),
+        (2, 123, 0x02),
+        (9, 123, 0x02),
+        (23, 123, 0x02),
+        (40, 123, 0x03),
+        (97, 0, 0x00),
+        (101, 0, 0x00),
+        (111, 0, 0x00),
+        (133, 0, 0x00),
+    ],
+    // 93
+    [
+        (3, 60, 0x02),
+        (6, 60, 0x02),
+        (10, 60, 0x02),
+        (15, 60, 0x02),
+        (24, 60, 0x02),
+        (31, 60, 0x02),
+        (41, 60, 0x02),
+        (56, 60, 0x03),
+        (3, 96, 0x02),
+        (6, 96, 0x02),
+        (10, 96, 0x02),
+        (15, 96, 0x02),
+        (24, 96, 0x02),
+        (31, 96, 0x02),
+        (41, 96, 0x02),
+        (56, 96, 0x03),
+    ],
+    // 94
+    [
+        (3, 123, 0x02),
+        (6, 123, 0x02),
+        (10, 123, 0x02),
+        (15, 123, 0x02),
+        (24, 123, 0x02),
+        (31, 123, 0x02),
+        (41, 123, 0x02),
+        (56, 123, 0x03),
+        (98, 0, 0x00),
+        (99, 0, 0x00),
+        (102, 0, 0x00),
+        (105, 0, 0x00),
+        (112, 0, 0x00),
+        (119, 0, 0x00),
+        (134, 0, 0x00),
+        (153, 0, 0x00),
+    ],
+    // 95
+    [
+        (0, 92, 0x02),
+        (0, 195, 0x02),
+        (0, 208, 0x02),
+        (100, 0, 0x00),
+        (103, 0, 0x00),
+        (104, 0, 0x00),
+        (106, 0, 0x00),
+        (107, 0, 0x00),
+        (113, 0, 0x00),
+        (116, 0, 0x00),
+        (120, 0, 0x00),
+        (126, 0, 0x00),
+        (135, 0, 0x00),
+        (142, 0, 0x00),
+        (154, 0, 0x00),
+        (169, 0, 0x00),
+    ],
+    // 96
+    [
+        (1, 92, 0x02),
+        (22, 92, 0x03),
+        (1, 195, 0x02),
+        (22, 195, 0x03),
+        (1, 208, 0x02),
+        (22, 208, 0x03),
+        (0, 128, 0x02),
+        (0, 130, 0x02),
+        (0, 131, 0x02),
+        (0, 162, 0x02),
+        (0, 184, 0x02),
+        (0, 194, 0x02),
+        (0, 224, 0x02),
+        (0, 226, 0x02),
+        (108, 0, 0x00),
+        (109, 0, 0x00),
+    ],
+    // 97
+    [
+        (2, 92, 0x02),
+        (9, 92, 0x02),
+        (23, 92, 0x02),
+        (40, 92, 0x03),
+        (2, 195, 0x02),
+        (9, 195, 0x02),
+        (23, 195, 0x02),
+        (40, 195, 0x03),
+        (2, 208, 0x02),
+        (9, 208, 0x02),
+        (23, 208, 0x02),
+        (40, 208, 0x03),
+        (1, 128, 0x02),
+        (22, 128, 0x03),
+        (1, 130, 0x02),
+        (22, 130, 0x03),
+    ],
+    // 98
+    [
+        (3, 92, 0x02),
+        (6, 92, 0x02),
+        (10, 92, 0x02),
+        (15, 92, 0x02),
+        (24, 92, 0x02),
+        (31, 92, 0x02),
+        (41, 92, 0x02),
+        (56, 92, 0x03),
+        (3, 195, 0x02),
+        (6, 195, 0x02),
+        (10, 195, 0x02),
+        (15, 195, 0x02),
+        (24, 195, 0x02),
+        (31, 195, 0x02),
+        (41, 195, 0x02),
+        (56, 195, 0x03),
+    ],
+    // 99
+    [
+        (3, 208, 0x02),
+        (6, 208, 0x02),
+        (10, 208, 0x02),
+        (15, 208, 0x02),
+        (24, 208, 0x02),
+        (31, 208, 0x02),
+        (41, 208, 0x02),
+        (56, 208, 0x03),
+        (2, 128, 0x02),
+        (9, 128, 0x02),
+        (23, 128, 0x02),
+        (40, 128, 0x03),
+        (2, 130, 0x02),
+        (9, 130, 0x02),
+        (23, 130, 0x02),
+        (40, 130, 0x03),
+    ],
+    // 100
+    [
+        (3, 128, 0x02),
+        (6, 128, 0x02),
+        (10, 128, 0x02),
+        (15, 128, 0x02),
+        (24, 128, 0x02),
+        (31, 128, 0x02),
+        (41, 128, 0x02),
+        (56, 128, 0x03),
+        (3, 130, 0x02),
+        (6, 130, 0x02),
+        (10, 130, 0x02),
+        (15, 130, 0x02),
+        (24, 130, 0x02),
+        (31, 130, 0x02),
+        (41, 130, 0x02),
+        (56, 130, 0x03),
+    ],
+    // 101
+    [
+        (1, 131, 0x02),
+        (22, 131, 0x03),
+        (1, 162, 0x02),
+        (22, 162, 0x03),
+        (1, 184, 0x02),
+        (22, 184, 0x03),
+        (1, 194, 0x02),
+        (22, 194, 0x03),
+        (1, 224, 0x02),
+        (22, 224, 0x03),
+        (1, 226, 0x02),
+        (22, 226, 0x03),
+        (0, 153, 0x02),
+        (0, 161, 0x02),
+        (0, 167, 0x02),
+        (0, 172, 0x02),
+    ],
+    // 102
+    [
+        (2, 131, 0x02),
+        (9, 131, 0x02),
+        (23, 131, 0x02),
+        (40, 131, 0x03),
+        (2, 162, 0x02),
+        (9, 162, 0x02),
+        (23, 162, 0x02),
+        (40, 162, 0x03),
+        (2, 184, 0x02),
+        (9, 184, 0x02),
+        (23, 184, 0x02),
+        (40, 184, 0x03),
+        (2, 194, 0x02),
+        (9, 194, 0x02),
+        (23, 194, 0x02),
+        (40, 194, 0x03),
+    ],
+    // 103
+    [
+        (3, 131, 0x02),
+        (6, 131, 0x02),
+        (10, 131, 0x02),
+        (15, 131, 0x02),
+        (24, 131, 0x02),
+        (31, 131, 0x02),
+        (41, 131, 0x02),
+        (56, 131, 0x03),
+        (3, 162, 0x02),
+        (6, 162, 0x02),
+        (10, 162, 0x02),
+        (15, 162, 0x02),
+        (24, 162, 0x02),
+        (31, 162, 0x02),
+        (41, 162, 0x02),
+        (56, 162, 0x03),
+    ],
+    // 104
+    [
+        (3, 184, 0x02),
+        (6, 184, 0x02),
+        (10, 184, 0x02),
+        (15, 184, 0x02),
+        (24, 184, 0x02),
+        (31, 184, 0x02),
+        (41, 184, 0x02),
+        (56, 184, 0x03),
+        (3, 194, 0x02),
+        (6, 194, 0x02),
+        (10, 194, 0x02),
+        (15, 194, 0x02),
+        (24, 194, 0x02),
+        (31, 194, 0x02),
+        (41, 194, 0x02),
+        (56, 194, 0x03),
+    ],
+    // 105
+    [
+        (2, 224, 0x02),
+        (9, 224, 0x02),
+        (23, 224, 0x02),
+        (40, 224, 0x03),
+        (2, 226, 0x02),
+        (9, 226, 0x02),
+        (23, 226, 0x02),
+        (40, 226, 0x03),
+        (1, 153, 0x02),
+        (22, 153, 0x03),
+        (1, 161, 0x02),
+        (22, 161, 0x03),
+        (1, 167, 0x02),
+        (22, 167, 0x03),
+        (1, 172, 0x02),
+        (22, 172, 0x03),
+    ],
+    // 106
+    [
+        (3, 224, 0x02),
+        (6, 224, 0x02),
+        (10, 224, 0x02),
+        (15, 224, 0x02),
+        (24, 224, 0x02),
+        (31, 224, 0x02),
+        (41, 224, 0x02),
+        (56, 224, 0x03),
+        (3, 226, 0x02),
+        (6, 226, 0x02),
+        (10, 226, 0x02),
+        (15, 226, 0x02),
+        (24, 226, 0x02),
+        (31, 226, 0x02),
+        (41, 226, 0x02),
+        (56, 226, 0x03),
+    ],
+    // 107
+    [
+        (2, 153, 0x02),
+        (9, 153, 0x02),
+        (23, 153, 0x02),
+        (40, 153, 0x03),
+        (2, 161, 0x02),
+        (9, 161, 0x02),
+        (23, 161, 0x02),
+        (40, 161, 0x03),
+        (2, 167, 0x02),
+        (9, 167, 0x02),
+        (23, 167, 0x02),
+        (40, 167, 0x03),
+        (2, 172, 0x02),
+        (9, 172, 0x02),
+        (23, 172, 0x02),
+        (40, 172, 0x03),
+    ],
+    // 108
+    [
+        (3, 153, 0x02),
+        (6, 153, 0x02),
+        (10, 153, 0x02),
+        (15, 153, 0x02),
+        (24, 153, 0x02),
+        (31, 153, 0x02),
+        (41, 153, 0x02),
+        (56, 153, 0x03),
+        (3, 161, 0x02),
+        (6, 161, 0x02),
+        (10, 161, 0x02),
+        (15, 161, 0x02),
+        (24, 161, 0x02),
+        (31, 161, 0x02),
+        (41, 161, 0x02),
+        (56, 161, 0x03),
+    ],
+    // 109
+    [
+        (3, 167, 0x02),
+        (6, 167, 0x02),
+        (10, 167, 0x02),
+        (15, 167, 0x02),
+        (24, 167, 0x02),
+        (31, 167, 0x02),
+        (41, 167, 0x02),
+        (56, 167, 0x03),
+        (3, 172, 0x02),
+        (6, 172, 0x02),
+        (10, 172, 0x02),
+        (15, 172, 0x02),
+        (24, 172, 0x02),
+        (31, 172, 0x02),
+        (41, 172, 0x02),
+        (56, 172, 0x03),
+    ],
+    // 110
+    [
+        (114, 0, 0x00),
+        (115, 0, 0x00),
+        (117, 0, 0x00),
+        (118, 0, 0x00),
+        (121, 0, 0x00),
+        (123, 0, 0x00),
+        (127, 0, 0x00),
+        (130, 0, 0x00),
+        (136, 0, 0x00),
+        (139, 0, 0x00),
+        (143, 0, 0x00),
+        (146, 0, 0x00),
+        (155, 0, 0x00),
+        (162, 0, 0x00),
+        (170, 0, 0x00),
+        (180, 0, 0x00),
+    ],
+    // 111
+    [
+        (0, 176, 0x02),
+        (0, 177, 0x02),
+        (0, 179, 0x02),
+        (0, 209, 0x02),
+        (0, 216, 0x02),
+        (0, 217, 0x02),
+        (0, 227, 0x02),
+        (0, 229, 0x02),
+        (0, 230, 0x02),
+        (122, 0, 0x00),
+        (124, 0, 0x00),
+        (125, 0, 0x00),
+        (128, 0, 0x00),
+        (129, 0, 0x00),
+        (131, 0, 0x00),
+        (132, 0, 0x00),
+    ],
+    // 112
+    [
+        (1, 176, 0x02),
+        (22, 176, 0x03),
+        (1, 177, 0x02),
+        (22, 177, 0x03),
+        (1, 179, 0x02),
+        (22, 179, 0x03),
+        (1, 209, 0x02),
+        (22, 209, 0x03),
+        (1, 216, 0x02),
+        (22, 216, 0x03),
+        (1, 217, 0x02),
+        (22, 217, 0x03),
+        (1, 227, 0x02),
+        (22, 227, 0x03),
+        (1, 229, 0x02),
+        (22, 229, 0x03),
+    ],
+    // 113
+    [
+        (2, 176, 0x02),
+        (9, 176, 0x02),
+        (23, 176, 0x02),
+        (40, 176, 0x03),
+        (2, 177, 0x02),
+        (9, 177, 0x02),
+        (23, 177, 0x02),
+        (40, 177, 0x03),
+        (2, 179, 0x02),
+        (9, 179, 0x02),
+        (23, 179, 0x02),
+        (40, 179, 0x03),
+        (2, 209, 0x02),
+        (9, 209, 0x02),
+        (23, 209, 0x02),
+        (40, 209, 0x03),
+    ],
+    // 114
+    [
+        (3, 176, 0x02),
+        (6, 176, 0x02),
+        (10, 176, 0x02),
+        (15, 176, 0x02),
+        (24, 176, 0x02),
+        (31, 176, 0x02),
+        (41, 176, 0x02),
+        (56, 176, 0x03),
+        (3, 177, 0x02),
+        (6, 177, 0x02),
+        (10, 177, 0x02),
+        (15, 177, 0x02),
+        (24, 177, 0x02),
+        (31, 177, 0x02),
+        (41, 177, 0x02),
+        (56, 177, 0x03),
+    ],
+    // 115
+    [
+        (3, 179, 0x02),
+        (6, 179, 0x02),
+        (10, 179, 0x02),
+        (15, 179, 0x02),
+        (24, 179, 0x02),
+        (31, 179, 0x02),
+        (41, 179, 0x02),
+        (56, 179, 0x03),
+        (3, 209, 0x02),
+        (6, 209, 0x02),
+        (10, 209, 0x02),
+        (15, 209, 0x02),
+        (24, 209, 0x02),
+        (31, 209, 0x02),
+        (41, 209, 0x02),
+        (56, 209, 0x03),
+    ],
+    // 116
+    [
+        (2, 216, 0x02),
+        (9, 216, 0x02),
+        (23, 216, 0x02),
+        (40, 216, 0x03),
+        (2, 217, 0x02),
+        (9, 217, 0x02),
+        (23, 217, 0x02),
+        (40, 217, 0x03),
+        (2, 227, 0x02),
+        (9, 227, 0x02),
+        (23, 227, 0x02),
+        (40, 227, 0x03),
+        (2, 229, 0x02),
+        (9, 229, 0x02),
+        (23, 229, 0x02),
+        (40, 229, 0x03),
+    ],
+    // 117
+    [
+        (3, 216, 0x02),
+        (6, 216, 0x02),
+        (10, 216, 0x02),
+        (15, 216, 0x02),
+        (24, 216, 0x02),
+        (31, 216, 0x02),
+        (41, 216, 0x02),
+        (56, 216, 0x03),
+        (3, 217, 0x02),
+        (6, 217, 0x02),
+        (10, 217, 0x02),
+        (15, 217, 0x02),
+        (24, 217, 0x02),
+        (31, 217, 0x02),
+        (41, 217, 0x02),
+        (56, 217, 0x03),
+    ],
+    // 118
+    [
+        (3, 227, 0x02),
+        (6, 227, 0x02),
+        (10, 227, 0x02),
+        (15, 227, 0x02),
+        (24, 227, 0x02),
+        (31, 227, 0x02),
+        (41, 227, 0x02),
+        (56, 227, 0x03),
+        (3, 229, 0x02),
+        (6, 229, 0x02),
+        (10, 229, 0x02),
+        (15, 229, 0x02),
+        (24, 229, 0x02),
+        (31, 229, 0x02),
+        (41, 229, 0x02),
+        (56, 229, 0x03),
+    ],
+    // 119
+    [
+        (1, 230, 0x02),
+        (22, 230, 0x03),
+        (0, 129, 0x02),
+        (0, 132, 0x02),
+        (0, 133, 0x02),
+        (0, 134, 0x02),
+        (0, 136, 0x02),
+        (0, 146, 0x02),
+        (0, 154, 0x02),
+        (0, 156, 0x02),
+        (0, 160, 0x02),
+        (0, 163, 0x02),
+        (0, 164, 0x02),
+        (0, 169, 0x02),
+        (0, 170, 0x02),
+        (0, 173, 0x02),
+    ],
+    // 120
+    [
+        (2, 230, 0x02),
+        (9, 230, 0x02),
+        (23, 230, 0x02),
+        (40, 230, 0x03),
+        (1, 129, 0x02),
+        (22, 129, 0x03),
+        (1, 132, 0x02),
+        (22, 132, 0x03),
+        (1, 133, 0x02),
+        (22, 133, 0x03),
+        (1, 134, 0x02),
+        (22, 134, 0x03),
+        (1, 136, 0x02),
+        (22, 136, 0x03),
+        (1, 146, 0x02),
+        (22, 146, 0x03),
+    ],
+    // 121
+    [
+        (3, 230, 0x02),
+        (6, 230, 0x02),
+        (10, 230, 0x02),
+        (15, 230, 0x02),
+        (24, 230, 0x02),
+        (31, 230, 0x02),
+        (41, 230, 0x02),
+        (56, 230, 0x03),
+        (2, 129, 0x02),
+        (9, 129, 0x02),
+        (23, 129, 0x02),
+        (40, 129, 0x03),
+        (2, 132, 0x02),
+        (9, 132, 0x02),
+        (23, 132, 0x02),
+        (40, 132, 0x03),
+    ],
+    // 122
+    [
+        (3, 129, 0x02),
+        (6, 129, 0x02),
+        (10, 129, 0x02),
+        (15, 129, 0x02),
+        (24, 129, 0x02),
+        (31, 129, 0x02),
+        (41, 129, 0x02),
+        (56, 129, 0x03),
+        (3, 132, 0x02),
+        (6, 132, 0x02),
+        (10, 132, 0x02),
+        (15, 132, 0x02),
+        (24, 132, 0x02),
+        (31, 132, 0x02),
+        (41, 132, 0x02),
+        (56, 132, 0x03),
+    ],
+    // 123
+    [
+        (2, 133, 0x02),
+        (9, 133, 0x02),
+        (23, 133, 0x02),
+        (40, 133, 0x03),
+        (2, 134, 0x02),
+        (9, 134, 0x02),
+        (23, 134, 0x02),
+        (40, 134, 0x03),
+        (2, 136, 0x02),
+        (9, 136, 0x02),
+        (23, 136, 0x02),
+        (40, 136, 0x03),
+        (2, 146, 0x02),
+        (9, 146, 0x02),
+        (23, 146, 0x02),
+        (40, 146, 0x03),
+    ],
+    // 124
+    [
+        (3, 133, 0x02),
+        (6, 133, 0x02),
+        (10, 133, 0x02),
+        (15, 133, 0x02),
+        (24, 133, 0x02),
+        (31, 133, 0x02),
+        (41, 133, 0x02),
+        (56, 133, 0x03),
+        (3, 134, 0x02),
+        (6, 134, 0x02),
+        (10, 134, 0x02),
+        (15, 134, 0x02),
+        (24, 134, 0x02),
+        (31, 134, 0x02),
+        (41, 134, 0x02),
+        (56, 134, 0x03),
+    ],
+    // 125
+    [
+        (3, 136, 0x02),
+        (6, 136, 0x02),
+        (10, 136, 0x02),
+        (15, 136, 0x02),
+        (24, 136, 0x02),
+        (31, 136, 0x02),
+        (41, 136, 0x02),
+        (56, 136, 0x03),
+        (3, 146, 0x02),
+        (6, 146, 0x02),
+        (10, 146, 0x02),
+        (15, 146, 0x02),
+        (24, 146, 0x02),
+        (31, 146, 0x02),
+        (41, 146, 0x02),
+        (56, 146, 0x03),
+    ],
+    // 126
+    [
+        (1, 154, 0x02),
+        (22, 154, 0x03),
+        (1, 156, 0x02),
+        (22, 156, 0x03),
+        (1, 160, 0x02),
+        (22, 160, 0x03),
+        (1, 163, 0x02),
+        (22, 163, 0x03),
+        (1, 164, 0x02),
+        (22, 164, 0x03),
+        (1, 169, 0x02),
+        (22, 169, 0x03),
+        (1, 170, 0x02),
+        (22, 170, 0x03),
+        (1, 173, 0x02),
+        (22, 173, 0x03),
+    ],
+    // 127
+    [
+        (2, 154, 0x02),
+        (9, 154, 0x02),
+        (23, 154, 0x02),
+        (40, 154, 0x03),
+        (2, 156, 0x02),
+        (9, 156, 0x02),
+        (23, 156, 0x02),
+        (40, 156, 0x03),
+        (2, 160, 0x02),
+        (9, 160, 0x02),
+        (23, 160, 0x02),
+        (40, 160, 0x03),
+        (2, 163, 0x02),
+        (9, 163, 0x02),
+        (23, 163, 0x02),
+        (40, 163, 0x03),
+    ],
+    // 128
+    [
+        (3, 154, 0x02),
+        (6, 154, 0x02),
+        (10, 154, 0x02),
+        (15, 154, 0x02),
+        (24, 154, 0x02),
+        (31, 154, 0x02),
+        (41, 154, 0x02),
+        (56, 154, 0x03),
+        (3, 156, 0x02),
+        (6, 156, 0x02),
+        (10, 156, 0x02),
+        (15, 156, 0x02),
+        (24, 156, 0x02),
+        (31, 156, 0x02),
+        (41, 156, 0x02),
+        (56, 156, 0x03),
+    ],
+    // 129
+    [
+        (3, 160, 0x02),
+        (6, 160, 0x02),
+        (10, 160, 0x02),
+        (15, 160, 0x02),
+        (24, 160, 0x02),
+        (31, 160, 0x02),
+        (41, 160, 0x02),
+        (56, 160, 0x03),
+        (3, 163, 0x02),
+        (6, 163, 0x02),
+        (10, 163, 0x02),
+        (15, 163, 0x02),
+        (24, 163, 0x02),
+        (31, 163, 0x02),
+        (41, 163, 0x02),
+        (56, 163, 0x03),
+    ],
+    // 130
+    [
+        (2, 164, 0x02),
+        (9, 164, 0x02),
+        (23, 164, 0x02),
+        (40, 164, 0x03),
+        (2, 169, 0x02),
+        (9, 169, 0x02),
+        (23, 169, 0x02),
+        (40, 169, 0x03),
+        (2, 170, 0x02),
+        (9, 170, 0x02),
+        (23, 170, 0x02),
+        (40, 170, 0x03),
+        (2, 173, 0x02),
+        (9, 173, 0x02),
+        (23, 173, 0x02),
+        (40, 173, 0x03),
+    ],
+    // 131
+    [
+        (3, 164, 0x02),
+        (6, 164, 0x02),
+        (10, 164, 0x02),
+        (15, 164, 0x02),
+        (24, 164, 0x02),
+        (31, 164, 0x02),
+        (41, 164, 0x02),
+        (56, 164, 0x03),
+        (3, 169, 0x02),
+        (6, 169, 0x02),
+        (10, 169, 0x02),
+        (15, 169, 0x02),
+        (24, 169, 0x02),
+        (31, 169, 0x02),
+        (41, 169, 0x02),
+        (56, 169, 0x03),
+    ],
+    // 132
+    [
+        (3, 170, 0x02),
+        (6, 170, 0x02),
+        (10, 170, 0x02),
+        (15, 170, 0x02),
+        (24, 170, 0x02),
+        (31, 170, 0x02),
+        (41, 170, 0x02),
+        (56, 170, 0x03),
+        (3, 173, 0x02),
+        (6, 173, 0x02),
+        (10, 173, 0x02),
+        (15, 173, 0x02),
+        (24, 173, 0x02),
+        (31, 173, 0x02),
+        (41, 173, 0x02),
+        (56, 173, 0x03),
+    ],
+    // 133
+    [
+        (137, 0, 0x00),
+        (138, 0, 0x00),
+        (140, 0, 0x00),
+        (141, 0, 0x00),
+        (144, 0, 0x00),
+        (145, 0, 0x00),
+        (147, 0, 0x00),
+        (150, 0, 0x00),
+        (156, 0, 0x00),
+        (159, 0, 0x00),
+        (163, 0, 0x00),
+        (166, 0, 0x00),
+        (171, 0, 0x00),
+        (174, 0, 0x00),
+        (181, 0, 0x00),
+        (190, 0, 0x00),
+    ],
+    // 134
+    [
+        (0, 178, 0x02),
+        (0, 181, 0x02),
+        (0, 185, 0x02),
+        (0, 186, 0x02),
+        (0, 187, 0x02),
+        (0, 189, 0x02),
+        (0, 190, 0x02),
+        (0, 196, 0x02),
+        (0, 198, 0x02),
+        (0, 228, 0x02),
+        (0, 232, 0x02),
+        (0, 233, 0x02),
+        (148, 0, 0x00),
+        (149, 0, 0x00),
+        (151, 0, 0x00),
+        (152, 0, 0x00),
+    ],
+    // 135
+    [
+        (1, 178, 0x02),
+        (22, 178, 0x03),
+        (1, 181, 0x02),
+        (22, 181, 0x03),
+        (1, 185, 0x02),
+        (22, 185, 0x03),
+        (1, 186, 0x02),
+        (22, 186, 0x03),
+        (1, 187, 0x02),
+        (22, 187, 0x03),
+        (1, 189, 0x02),
+        (22, 189, 0x03),
+        (1, 190, 0x02),
+        (22, 190, 0x03),
+        (1, 196, 0x02),
+        (22, 196, 0x03),
+    ],
+    // 136
+    [
+        (2, 178, 0x02),
+        (9, 178, 0x02),
+        (23, 178, 0x02),
+        (40, 178, 0x03),
+        (2, 181, 0x02),
+        (9, 181, 0x02),
+        (23, 181, 0x02),
+        (40, 181, 0x03),
+        (2, 185, 0x02),
+        (9, 185, 0x02),
+        (23, 185, 0x02),
+        (40, 185, 0x03),
+        (2, 186, 0x02),
+        (9, 186, 0x02),
+        (23, 186, 0x02),
+        (40, 186, 0x03),
+    ],
+    // 137
+    [
+        (3, 178, 0x02),
+        (6, 178, 0x02),
+        (10, 178, 0x02),
+        (15, 178, 0x02),
+        (24, 178, 0x02),
+        (31, 178, 0x02),
+        (41, 178, 0x02),
+        (56, 178, 0x03),
+        (3, 181, 0x02),
+        (6, 181, 0x02),
+        (10, 181, 0x02),
+        (15, 181, 0x02),
+        (24, 181, 0x02),
+        (31, 181, 0x02),
+        (41, 181, 0x02),
+        (56, 181, 0x03),
+    ],
+    // 138
+    [
+        (3, 185, 0x02),
+        (6, 185, 0x02),
+        (10, 185, 0x02),
+        (15, 185, 0x02),
+        (24, 185, 0x02),
+        (31, 185, 0x02),
+        (41, 185, 0x02),
+        (56, 185, 0x03),
+        (3, 186, 0x02),
+        (6, 186, 0x02),
+        (10, 186, 0x02),
+        (15, 186, 0x02),
+        (24, 186, 0x02),
+        (31, 186, 0x02),
+        (41, 186, 0x02),
+        (56, 186, 0x03),
+    ],
+    // 139
+    [
+        (2, 187, 0x02),
+        (9, 187, 0x02),
+        (23, 187, 0x02),
+        (40, 187, 0x03),
+        (2, 189, 0x02),
+        (9, 189, 0x02),
+        (23, 189, 0x02),
+        (40, 189, 0x03),
+        (2, 190, 0x02),
+        (9, 190, 0x02),
+        (23, 190, 0x02),
+        (40, 190, 0x03),
+        (2, 196, 0x02),
+        (9, 196, 0x02),
+        (23, 196, 0x02),
+        (40, 196, 0x03),
+    ],
+    // 140
+    [
+        (3, 187, 0x02),
+        (6, 187, 0x02),
+        (10, 187, 0x02),
+        (15, 187, 0x02),
+        (24, 187, 0x02),
+        (31, 187, 0x02),
+        (41, 187, 0x02),
+        (56, 187, 0x03),
+        (3, 189, 0x02),
+        (6, 189, 0x02),
+        (10, 189, 0x02),
+        (15, 189, 0x02),
+        (24, 189, 0x02),
+        (31, 189, 0x02),
+        (41, 189, 0x02),
+        (56, 189, 0x03),
+    ],
+    // 141
+    [
+        (3, 190, 0x02),
+        (6, 190, 0x02),
+        (10, 190, 0x02),
+        (15, 190, 0x02),
+        (24, 190, 0x02),
+        (31, 190, 0x02),
+        (41, 190, 0x02),
+        (56, 190, 0x03),
+        (3, 196, 0x02),
+        (6, 196, 0x02),
+        (10, 196, 0x02),
+        (15, 196, 0x02),
+        (24, 196, 0x02),
+        (31, 196, 0x02),
+        (41, 196, 0x02),
+        (56, 196, 0x03),
+    ],
+    // 142
+    [
+        (1, 198, 0x02),
+        (22, 198, 0x03),
+        (1, 228, 0x02),
+        (22, 228, 0x03),
+        (1, 232, 0x02),
+        (22, 232, 0x03),
+        (1, 233, 0x02),
+        (22, 233, 0x03),
+        (0, 1, 0x02),
+        (0, 135, 0x02),
+        (0, 137, 0x02),
+        (0, 138, 0x02),
+        (0, 139, 0x02),
+        (0, 140, 0x02),
+        (0, 141, 0x02),
+        (0, 143, 0x02),
+    ],
+    // 143
+    [
+        (2, 198, 0x02),
+        (9, 198, 0x02),
+        (23, 198, 0x02),
+        (40, 198, 0x03),
+        (2, 228, 0x02),
+        (9, 228, 0x02),
+        (23, 228, 0x02),
+        (40, 228, 0x03),
+        (2, 232, 0x02),
+        (9, 232, 0x02),
+        (23, 232, 0x02),
+        (40, 232, 0x03),
+        (2, 233, 0x02),
+        (9, 233, 0x02),
+        (23, 233, 0x02),
+        (40, 233, 0x03),
+    ],
+    // 144
+    [
+        (3, 198, 0x02),
+        (6, 198, 0x02),
+        (10, 198, 0x02),
+        (15, 198, 0x02),
+        (24, 198, 0x02),
+        (31, 198, 0x02),
+        (41, 198, 0x02),
+        (56, 198, 0x03),
+        (3, 228, 0x02),
+        (6, 228, 0x02),
+        (10, 228, 0x02),
+        (15, 228, 0x02),
+        (24, 228, 0x02),
+        (31, 228, 0x02),
+        (41, 228, 0x02),
+        (56, 228, 0x03),
+    ],
+    // 145
+    [
+        (3, 232, 0x02),
+        (6, 232, 0x02),
+        (10, 232, 0x02),
+        (15, 232, 0x02),
+        (24, 232, 0x02),
+        (31, 232, 0x02),
+        (41, 232, 0x02),
+        (56, 232, 0x03),
+        (3, 233, 0x02),
+        (6, 233, 0x02),
+        (10, 233, 0x02),
+        (15, 233, 0x02),
+        (24, 233, 0x02),
+        (31, 233, 0x02),
+        (41, 233, 0x02),
+        (56, 233, 0x03),
+    ],
+    // 146
+    [
+        (1, 1, 0x02),
+        (22, 1, 0x03),
+        (1, 135, 0x02),
+        (22, 135, 0x03),
+        (1, 137, 0x02),
+        (22, 137, 0x03),
+        (1, 138, 0x02),
+        (22, 138, 0x03),
+        (1, 139, 0x02),
+        (22, 139, 0x03),
+        (1, 140, 0x02),
+        (22, 140, 0x03),
+        (1, 141, 0x02),
+        (22, 141, 0x03),
+        (1, 143, 0x02),
+        (22, 143, 0x03),
+    ],
+    // 147
+    [
+        (2, 1, 0x02),
+        (9, 1, 0x02),
+        (23, 1, 0x02),
+        (40, 1, 0x03),
+        (2, 135, 0x02),
+        (9, 135, 0x02),
+        (23, 135, 0x02),
+        (40, 135, 0x03),
+        (2, 137, 0x02),
+        (9, 137, 0x02),
+        (23, 137, 0x02),
+        (40, 137, 0x03),
+        (2, 138, 0x02),
+        (9, 138, 0x02),
+        (23, 138, 0x02),
+        (40, 138, 0x03),
+    ],
+    // 148
+    [
+        (3, 1, 0x02),
+        (6, 1, 0x02),
+        (10, 1, 0x02),
+        (15, 1, 0x02),
+        (24, 1, 0x02),
+        (31, 1, 0x02),
+        (41, 1, 0x02),
+        (56, 1, 0x03),
+        (3, 135, 0x02),
+        (6, 135, 0x02),
+        (10, 135, 0x02),
+        (15, 135, 0x02),
+        (24, 135, 0x02),
+        (31, 135, 0x02),
+        (41, 135, 0x02),
+        (56, 135, 0x03),
+    ],
+    // 149
+    [
+        (3, 137, 0x02),
+        (6, 137, 0x02),
+        (10, 137, 0x02),
+        (15, 137, 0x02),
+        (24, 137, 0x02),
+        (31, 137, 0x02),
+        (41, 137, 0x02),
+        (56, 137, 0x03),
+        (3, 138, 0x02),
+        (6, 138, 0x02),
+        (10, 138, 0x02),
+        (15, 138, 0x02),
+        (24, 138, 0x02),
+        (31, 138, 0x02),
+        (41, 138, 0x02),
+        (56, 138, 0x03),
+    ],
+    // 150
+    [
+        (2, 139, 0x02),
+        (9, 139, 0x02),
+        (23, 139, 0x02),
+        (40, 139, 0x03),
+        (2, 140, 0x02),
+        (9, 140, 0x02),
+        (23, 140, 0x02),
+        (40, 140, 0x03),
+        (2, 141, 0x02),
+        (9, 141, 0x02),
+        (23, 141, 0x02),
+        (40, 141, 0x03),
+        (2, 143, 0x02),
+        (9, 143, 0x02),
+        (23, 143, 0x02),
+        (40, 143, 0x03),
+    ],
+    // 151
+    [
+        (3, 139, 0x02),
+        (6, 139, 0x02),
+        (10, 139, 0x02),
+        (15, 139, 0x02),
+        (24, 139, 0x02),
+        (31, 139, 0x02),
+        (41, 139, 0x02),
+        (56, 139, 0x03),
+        (3, 140, 0x02),
+        (6, 140, 0x02),
+        (10, 140, 0x02),
+        (15, 140, 0x02),
+        (24, 140, 0x02),
+        (31, 140, 0x02),
+        (41, 140, 0x02),
+        (56, 140, 0x03),
+    ],
+    // 152
+    [
+        (3, 141, 0x02),
+        (6, 141, 0x02),
+        (10, 141, 0x02),
+        (15, 141, 0x02),
+        (24, 141, 0x02),
+        (31, 141, 0x02),
+        (41, 141, 0x02),
+        (56, 141, 0x03),
+        (3, 143, 0x02),
+        (6, 143, 0x02),
+        (10, 143, 0x02),
+        (15, 143, 0x02),
+        (24, 143, 0x02),
+        (31, 143, 0x02),
+        (41, 143, 0x02),
+        (56, 143, 0x03),
+    ],
+    // 153
+    [
+        (157, 0, 0x00),
+        (158, 0, 0x00),
+        (160, 0, 0x00),
+        (161, 0, 0x00),
+        (164, 0, 0x00),
+        (165, 0, 0x00),
+        (167, 0, 0x00),
+        (168, 0, 0x00),
+        (172, 0, 0x00),
+        (173, 0, 0x00),
+        (175, 0, 0x00),
+        (177, 0, 0x00),
+        (182, 0, 0x00),
+        (185, 0, 0x00),
+        (191, 0, 0x00),
+        (207, 0, 0x00),
+    ],
+    // 154
+    [
+        (0, 147, 0x02),
+        (0, 149, 0x02),
+        (0, 150, 0x02),
+        (0, 151, 0x02),
+        (0, 152, 0x02),
+        (0, 155, 0x02),
+        (0, 157, 0x02),
+        (0, 158, 0x02),
+        (0, 165, 0x02),
+        (0, 166, 0x02),
+        (0, 168, 0x02),
+        (0, 174, 0x02),
+        (0, 175, 0x02),
+        (0, 180, 0x02),
+        (0, 182, 0x02),
+        (0, 183, 0x02),
+    ],
+    // 155
+    [
+        (1, 147, 0x02),
+        (22, 147, 0x03),
+        (1, 149, 0x02),
+        (22, 149, 0x03),
+        (1, 150, 0x02),
+        (22, 150, 0x03),
+        (1, 151, 0x02),
+        (22, 151, 0x03),
+        (1, 152, 0x02),
+        (22, 152, 0x03),
+        (1, 155, 0x02),
+        (22, 155, 0x03),
+        (1, 157, 0x02),
+        (22, 157, 0x03),
+        (1, 158, 0x02),
+        (22, 158, 0x03),
+    ],
+    // 156
+    [
+        (2, 147, 0x02),
+        (9, 147, 0x02),
+        (23, 147, 0x02),
+        (40, 147, 0x03),
+        (2, 149, 0x02),
+        (9, 149, 0x02),
+        (23, 149, 0x02),
+        (40, 149, 0x03),
+        (2, 150, 0x02),
+        (9, 150, 0x02),
+        (23, 150, 0x02),
+        (40, 150, 0x03),
+        (2, 151, 0x02),
+        (9, 151, 0x02),
+        (23, 151, 0x02),
+        (40, 151, 0x03),
+    ],
+    // 157
+    [
+        (3, 147, 0x02),
+        (6, 147, 0x02),
+        (10, 147, 0x02),
+        (15, 147, 0x02),
+        (24, 147, 0x02),
+        (31, 147, 0x02),
+        (41, 147, 0x02),
+        (56, 147, 0x03),
+        (3, 149, 0x02),
+        (6, 149, 0x02),
+        (10, 149, 0x02),
+        (15, 149, 0x02),
+        (24, 149, 0x02),
+        (31, 149, 0x02),
+        (41, 149, 0x02),
+        (56, 149, 0x03),
+    ],
+    // 158
+    [
+        (3, 150, 0x02),
+        (6, 150, 0x02),
+        (10, 150, 0x02),
+        (15, 150, 0x02),
+        (24, 150, 0x02),
+        (31, 150, 0x02),
+        (41, 150, 0x02),
+        (56, 150, 0x03),
+        (3, 151, 0x02),
+        (6, 151, 0x02),
+        (10, 151, 0x02),
+        (15, 151, 0x02),
+        (24, 151, 0x02),
+        (31, 151, 0x02),
+        (41, 151, 0x02),
+        (56, 151, 0x03),
+    ],
+    // 159
+    [
+        (2, 152, 0x02),
+        (9, 152, 0x02),
+        (23, 152, 0x02),
+        (40, 152, 0x03),
+        (2, 155, 0x02),
+        (9, 155, 0x02),
+        (23, 155, 0x02),
+        (40, 155, 0x03),
+        (2, 157, 0x02),
+        (9, 157, 0x02),
+        (23, 157, 0x02),
+        (40, 157, 0x03),
+        (2, 158, 0x02),
+        (9, 158, 0x02),
+        (23, 158, 0x02),
+        (40, 158, 0x03),
+    ],
+    // 160
+    [
+        (3, 152, 0x02),
+        (6, 152, 0x02),
+        (10, 152, 0x02),
+        (15, 152, 0x02),
+        (24, 152, 0x02),
+        (31, 152, 0x02),
+        (41, 152, 0x02),
+        (56, 152, 0x03),
+        (3, 155, 0x02),
+        (6, 155, 0x02),
+        (10, 155, 0x02),
+        (15, 155, 0x02),
+        (24, 155, 0x02),
+        (31, 155, 0x02),
+        (41, 155, 0x02),
+        (56, 155, 0x03),
+    ],
+    // 161
+    [
+        (3, 157, 0x02),
+        (6, 157, 0x02),
+        (10, 157, 0x02),
+        (15, 157, 0x02),
+        (24, 157, 0x02),
+        (31, 157, 0x02),
+        (41, 157, 0x02),
+        (56, 157, 0x03),
+        (3, 158, 0x02),
+        (6, 158, 0x02),
+        (10, 158, 0x02),
+        (15, 158, 0x02),
+        (24, 158, 0x02),
+        (31, 158, 0x02),
+        (41, 158, 0x02),
+        (56, 158, 0x03),
+    ],
+    // 162
+    [
+        (1, 165, 0x02),
+        (22, 165, 0x03),
+        (1, 166, 0x02),
+        (22, 166, 0x03),
+        (1, 168, 0x02),
+        (22, 168, 0x03),
+        (1, 174, 0x02),
+        (22, 174, 0x03),
+        (1, 175, 0x02),
+        (22, 175, 0x03),
+        (1, 180, 0x02),
+        (22, 180, 0x03),
+        (1, 182, 0x02),
+        (22, 182, 0x03),
+        (1, 183, 0x02),
+        (22, 183, 0x03),
+    ],
+    // 163
+    [
+        (2, 165, 0x02),
+        (9, 165, 0x02),
+        (23, 165, 0x02),
+        (40, 165, 0x03),
+        (2, 166, 0x02),
+        (9, 166, 0x02),
+        (23, 166, 0x02),
+        (40, 166, 0x03),
+        (2, 168, 0x02),
+        (9, 168, 0x02),
+        (23, 168, 0x02),
+        (40, 168, 0x03),
+        (2, 174, 0x02),
+        (9, 174, 0x02),
+        (23, 174, 0x02),
+        (40, 174, 0x03),
+    ],
+    // 164
+    [
+        (3, 165, 0x02),
+        (6, 165, 0x02),
+        (10, 165, 0x02),
+        (15, 165, 0x02),
+        (24, 165, 0x02),
+        (31, 165, 0x02),
+        (41, 165, 0x02),
+        (56, 165, 0x03),
+        (3, 166, 0x02),
+        (6, 166, 0x02),
+        (10, 166, 0x02),
+        (15, 166, 0x02),
+        (24, 166, 0x02),
+        (31, 166, 0x02),
+        (41, 166, 0x02),
+        (56, 166, 0x03),
+    ],
+    // 165
+    [
+        (3, 168, 0x02),
+        (6, 168, 0x02),
+        (10, 168, 0x02),
+        (15, 168, 0x02),
+        (24, 168, 0x02),
+        (31, 168, 0x02),
+        (41, 168, 0x02),
+        (56, 168, 0x03),
+        (3, 174, 0x02),
+        (6, 174, 0x02),
+        (10, 174, 0x02),
+        (15, 174, 0x02),
+        (24, 174, 0x02),
+        (31, 174, 0x02),
+        (41, 174, 0x02),
+        (56, 174, 0x03),
+    ],
+    // 166
+    [
+        (2, 175, 0x02),
+        (9, 175, 0x02),
+        (23, 175, 0x02),
+        (40, 175, 0x03),
+        (2, 180, 0x02),
+        (9, 180, 0x02),
+        (23, 180, 0x02),
+        (40, 180, 0x03),
+        (2, 182, 0x02),
+        (9, 182, 0x02),
+        (23, 182, 0x02),
+        (40, 182, 0x03),
+        (2, 183, 0x02),
+        (9, 183, 0x02),
+        (23, 183, 0x02),
+        (40, 183, 0x03),
+    ],
+    // 167
+    [
+        (3, 175, 0x02),
+        (6, 175, 0x02),
+        (10, 175, 0x02),
+        (15, 175, 0x02),
+        (24, 175, 0x02),
+        (31, 175, 0x02),
+        (41, 175, 0x02),
+        (56, 175, 0x03),
+        (3, 180, 0x02),
+        (6, 180, 0x02),
+        (10, 180, 0x02),
+        (15, 180, 0x02),
+        (24, 180, 0x02),
+        (31, 180, 0x02),
+        (41, 180, 0x02),
+        (56, 180, 0x03),
+    ],
+    // 168
+    [
+        (3, 182, 0x02),
+        (6, 182, 0x02),
+        (10, 182, 0x02),
+        (15, 182, 0x02),
+        (24, 182, 0x02),
+        (31, 182, 0x02),
+        (41, 182, 0x02),
+        (56, 182, 0x03),
+        (3, 183, 0x02),
+        (6, 183, 0x02),
+        (10, 183, 0x02),
+        (15, 183, 0x02),
+        (24, 183, 0x02),
+        (31, 183, 0x02),
+        (41, 183, 0x02),
+        (56, 183, 0x03),
+    ],
+    // 169
+    [
+        (0, 188, 0x02),
+        (0, 191, 0x02),
+        (0, 197, 0x02),
+        (0, 231, 0x02),
+        (0, 239, 0x02),
+        (176, 0, 0x00),
+        (178, 0, 0x00),
+        (179, 0, 0x00),
+        (183, 0, 0x00),
+        (184, 0, 0x00),
+        (186, 0, 0x00),
+        (187, 0, 0x00),
+        (192, 0, 0x00),
+        (199, 0, 0x00),
+        (208, 0, 0x00),
+        (223, 0, 0x00),
+    ],
+    // 170
+    [
+        (1, 188, 0x02),
+        (22, 188, 0x03),
+        (1, 191, 0x02),
+        (22, 191, 0x03),
+        (1, 197, 0x02),
+        (22, 197, 0x03),
+        (1, 231, 0x02),
+        (22, 231, 0x03),
+        (1, 239, 0x02),
+        (22, 239, 0x03),
+        (0, 9, 0x02),
+        (0, 142, 0x02),
+        (0, 144, 0x02),
+        (0, 145, 0x02),
+        (0, 148, 0x02),
+        (0, 159, 0x02),
+    ],
+    // 171
+    [
+        (2, 188, 0x02),
+        (9, 188, 0x02),
+        (23, 188, 0x02),
+        (40, 188, 0x03),
+        (2, 191, 0x02),
+        (9, 191, 0x02),
+        (23, 191, 0x02),
+        (40, 191, 0x03),
+        (2, 197, 0x02),
+        (9, 197, 0x02),
+        (23, 197, 0x02),
+        (40, 197, 0x03),
+        (2, 231, 0x02),
+        (9, 231, 0x02),
+        (23, 231, 0x02),
+        (40, 231, 0x03),
+    ],
+    // 172
+    [
+        (3, 188, 0x02),
+        (6, 188, 0x02),
+        (10, 188, 0x02),
+        (15, 188, 0x02),
+        (24, 188, 0x02),
+        (31, 188, 0x02),
+        (41, 188, 0x02),
+        (56, 188, 0x03),
+        (3, 191, 0x02),
+        (6, 191, 0x02),
+        (10, 191, 0x02),
+        (15, 191, 0x02),
+        (24, 191, 0x02),
+        (31, 191, 0x02),
+        (41, 191, 0x02),
+        (56, 191, 0x03),
+    ],
+    // 173
+    [
+        (3, 197, 0x02),
+        (6, 197, 0x02),
+        (10, 197, 0x02),
+        (15, 197, 0x02),
+        (24, 197, 0x02),
+        (31, 197, 0x02),
+        (41, 197, 0x02),
+        (56, 197, 0x03),
+        (3, 231, 0x02),
+        (6, 231, 0x02),
+        (10, 231, 0x02),
+        (15, 231, 0x02),
+        (24, 231, 0x02),
+        (31, 231, 0x02),
+        (41, 231, 0x02),
+        (56, 231, 0x03),
+    ],
+    // 174
+    [
+        (2, 239, 0x02),
+        (9, 239, 0x02),
+        (23, 239, 0x02),
+        (40, 239, 0x03),
+        (1, 9, 0x02),
+        (22, 9, 0x03),
+        (1, 142, 0x02),
+        (22, 142, 0x03),
+        (1, 144, 0x02),
+        (22, 144, 0x03),
+        (1, 145, 0x02),
+        (22, 145, 0x03),
+        (1, 148, 0x02),
+        (22, 148, 0x03),
+        (1, 159, 0x02),
+        (22, 159, 0x03),
+    ],
+    // 175
+    [
+        (3, 239, 0x02),
+        (6, 239, 0x02),
+        (10, 239, 0x02),
+        (15, 239, 0x02),
+        (24, 239, 0x02),
+        (31, 239, 0x02),
+        (41, 239, 0x02),
+        (56, 239, 0x03),
+        (2, 9, 0x02),
+        (9, 9, 0x02),
+        (23, 9, 0x02),
+        (40, 9, 0x03),
+        (2, 142, 0x02),
+        (9, 142, 0x02),
+        (23, 142, 0x02),
+        (40, 142, 0x03),
+    ],
+    // 176
+    [
+        (3, 9, 0x02),
+        (6, 9, 0x02),
+        (10, 9, 0x02),
+        (15, 9, 0x02),
+        (24, 9, 0x02),
+        (31, 9, 0x02),
+        (41, 9, 0x02),
+        (56, 9, 0x03),
+        (3, 142, 0x02),
+        (6, 142, 0x02),
+        (10, 142, 0x02),
+        (15, 142, 0x02),
+        (24, 142, 0x02),
+        (31, 142, 0x02),
+        (41, 142, 0x02),
+        (56, 142, 0x03),
+    ],
+    // 177
+    [
+        (2, 144, 0x02),
+        (9, 144, 0x02),
+        (23, 144, 0x02),
+        (40, 144, 0x03),
+        (2, 145, 0x02),
+        (9, 145, 0x02),
+        (23, 145, 0x02),
+        (40, 145, 0x03),
+        (2, 148, 0x02),
+        (9, 148, 0x02),
+        (23, 148, 0x02),
+        (40, 148, 0x03),
+        (2, 159, 0x02),
+        (9, 159, 0x02),
+        (23, 159, 0x02),
+        (40, 159, 0x03),
+    ],
+    // 178
+    [
+        (3, 144, 0x02),
+        (6, 144, 0x02),
+        (10, 144, 0x02),
+        (15, 144, 0x02),
+        (24, 144, 0x02),
+        (31, 144, 0x02),
+        (41, 144, 0x02),
+        (56, 144, 0x03),
+        (3, 145, 0x02),
+        (6, 145, 0x02),
+        (10, 145, 0x02),
+        (15, 145, 0x02),
+        (24, 145, 0x02),
+        (31, 145, 0x02),
+        (41, 145, 0x02),
+        (56, 145, 0x03),
+    ],
+    // 179
+    [
+        (3, 148, 0x02),
+        (6, 148, 0x02),
+        (10, 148, 0x02),
+        (15, 148, 0x02),
+        (24, 148, 0x02),
+        (31, 148, 0x02),
+        (41, 148, 0x02),
+        (56, 148, 0x03),
+        (3, 159, 0x02),
+        (6, 159, 0x02),
+        (10, 159, 0x02),
+        (15, 159, 0x02),
+        (24, 159, 0x02),
+        (31, 159, 0x02),
+        (41, 159, 0x02),
+        (56, 159, 0x03),
+    ],
+    // 180
+    [
+        (0, 171, 0x02),
+        (0, 206, 0x02),
+        (0, 215, 0x02),
+        (0, 225, 0x02),
+        (0, 236, 0x02),
+        (0, 237, 0x02),
+        (188, 0, 0x00),
+        (189, 0, 0x00),
+        (193, 0, 0x00),
+        (196, 0, 0x00),
+        (200, 0, 0x00),
+        (203, 0, 0x00),
+        (209, 0, 0x00),
+        (216, 0, 0x00),
+        (224, 0, 0x00),
+        (238, 0, 0x00),
+    ],
+    // 181
+    [
+        (1, 171, 0x02),
+        (22, 171, 0x03),
+        (1, 206, 0x02),
+        (22, 206, 0x03),
+        (1, 215, 0x02),
+        (22, 215, 0x03),
+        (1, 225, 0x02),
+        (22, 225, 0x03),
+        (1, 236, 0x02),
+        (22, 236, 0x03),
+        (1, 237, 0x02),
+        (22, 237, 0x03),
+        (0, 199, 0x02),
+        (0, 207, 0x02),
+        (0, 234, 0x02),
+        (0, 235, 0x02),
+    ],
+    // 182
+    [
+        (2, 171, 0x02),
+        (9, 171, 0x02),
+        (23, 171, 0x02),
+        (40, 171, 0x03),
+        (2, 206, 0x02),
+        (9, 206, 0x02),
+        (23, 206, 0x02),
+        (40, 206, 0x03),
+        (2, 215, 0x02),
+        (9, 215, 0x02),
+        (23, 215, 0x02),
+        (40, 215, 0x03),
+        (2, 225, 0x02),
+        (9, 225, 0x02),
+        (23, 225, 0x02),
+        (40, 225, 0x03),
+    ],
+    // 183
+    [
+        (3, 171, 0x02),
+        (6, 171, 0x02),
+        (10, 171, 0x02),
+        (15, 171, 0x02),
+        (24, 171, 0x02),
+        (31, 171, 0x02),
+        (41, 171, 0x02),
+        (56, 171, 0x03),
+        (3, 206, 0x02),
+        (6, 206, 0x02),
+        (10, 206, 0x02),
+        (15, 206, 0x02),
+        (24, 206, 0x02),
+        (31, 206, 0x02),
+        (41, 206, 0x02),
+        (56, 206, 0x03),
+    ],
+    // 184
+    [
+        (3, 215, 0x02),
+        (6, 215, 0x02),
+        (10, 215, 0x02),
+        (15, 215, 0x02),
+        (24, 215, 0x02),
+        (31, 215, 0x02),
+        (41, 215, 0x02),
+        (56, 215, 0x03),
+        (3, 225, 0x02),
+        (6, 225, 0x02),
+        (10, 225, 0x02),
+        (15, 225, 0x02),
+        (24, 225, 0x02),
+        (31, 225, 0x02),
+        (41, 225, 0x02),
+        (56, 225, 0x03),
+    ],
+    // 185
+    [
+        (2, 236, 0x02),
+        (9, 236, 0x02),
+        (23, 236, 0x02),
+        (40, 236, 0x03),
+        (2, 237, 0x02),
+        (9, 237, 0x02),
+        (23, 237, 0x02),
+        (40, 237, 0x03),
+        (1, 199, 0x02),
+        (22, 199, 0x03),
+        (1, 207, 0x02),
+        (22, 207, 0x03),
+        (1, 234, 0x02),
+        (22, 234, 0x03),
+        (1, 235, 0x02),
+        (22, 235, 0x03),
+    ],
+    // 186
+    [
+        (3, 236, 0x02),
+        (6, 236, 0x02),
+        (10, 236, 0x02),
+        (15, 236, 0x02),
+        (24, 236, 0x02),
+        (31, 236, 0x02),
+        (41, 236, 0x02),
+        (56, 236, 0x03),
+        (3, 237, 0x02),
+        (6, 237, 0x02),
+        (10, 237, 0x02),
+        (15, 237, 0x02),
+        (24, 237, 0x02),
+        (31, 237, 0x02),
+        (41, 237, 0x02),
+        (56, 237, 0x03),
+    ],
+    // 187
+    [
+        (2, 199, 0x02),
+        (9, 199, 0x02),
+        (23, 199, 0x02),
+        (40, 199, 0x03),
+        (2, 207, 0x02),
+        (9, 207, 0x02),
+        (23, 207, 0x02),
+        (40, 207, 0x03),
+        (2, 234, 0x02),
+        (9, 234, 0x02),
+        (23, 234, 0x02),
+        (40, 234, 0x03),
+        (2, 235, 0x02),
+        (9, 235, 0x02),
+        (23, 235, 0x02),
+        (40, 235, 0x03),
+    ],
+    // 188
+    [
+        (3, 199, 0x02),
+        (6, 199, 0x02),
+        (10, 199, 0x02),
+        (15, 199, 0x02),
+        (24, 199, 0x02),
+        (31, 199, 0x02),
+        (41, 199, 0x02),
+        (56, 199, 0x03),
+        (3, 207, 0x02),
+        (6, 207, 0x02),
+        (10, 207, 0x02),
+        (15, 207, 0x02),
+        (24, 207, 0x02),
+        (31, 207, 0x02),
+        (41, 207, 0x02),
+        (56, 207, 0x03),
+    ],
+    // 189
+    [
+        (3, 234, 0x02),
+        (6, 234, 0x02),
+        (10, 234, 0x02),
+        (15, 234, 0x02),
+        (24, 234, 0x02),
+        (31, 234, 0x02),
+        (41, 234, 0x02),
+        (56, 234, 0x03),
+        (3, 235, 0x02),
+        (6, 235, 0x02),
+        (10, 235, 0x02),
+        (15, 235, 0x02),
+        (24, 235, 0x02),
+        (31, 235, 0x02),
+        (41, 235, 0x02),
+        (56, 235, 0x03),
+    ],
+    // 190
+    [
+        (194, 0, 0x00),
+        (195, 0, 0x00),
+        (197, 0, 0x00),
+        (198, 0, 0x00),
+        (201, 0, 0x00),
+        (202, 0, 0x00),
+        (204, 0, 0x00),
+        (205, 0, 0x00),
+        (210, 0, 0x00),
+        (213, 0, 0x00),
+        (217, 0, 0x00),
+        (220, 0, 0x00),
+        (225, 0, 0x00),
+        (231, 0, 0x00),
+        (239, 0, 0x00),
+        (246, 0, 0x00),
+    ],
+    // 191
+    [
+        (0, 192, 0x02),
+        (0, 193, 0x02),
+        (0, 200, 0x02),
+        (0, 201, 0x02),
+        (0, 202, 0x02),
+        (0, 205, 0x02),
+        (0, 210, 0x02),
+        (0, 213, 0x02),
+        (0, 218, 0x02),
+        (0, 219, 0x02),
+        (0, 238, 0x02),
+        (0, 240, 0x02),
+        (0, 242, 0x02),
+        (0, 243, 0x02),
+        (0, 255, 0x02),
+        (206, 0, 0x00),
+    ],
+    // 192
+    [
+        (1, 192, 0x02),
+        (22, 192, 0x03),
+        (1, 193, 0x02),
+        (22, 193, 0x03),
+        (1, 200, 0x02),
+        (22, 200, 0x03),
+        (1, 201, 0x02),
+        (22, 201, 0x03),
+        (1, 202, 0x02),
+        (22, 202, 0x03),
+        (1, 205, 0x02),
+        (22, 205, 0x03),
+        (1, 210, 0x02),
+        (22, 210, 0x03),
+        (1, 213, 0x02),
+        (22, 213, 0x03),
+    ],
+    // 193
+    [
+        (2, 192, 0x02),
+        (9, 192, 0x02),
+        (23, 192, 0x02),
+        (40, 192, 0x03),
+        (2, 193, 0x02),
+        (9, 193, 0x02),
+        (23, 193, 0x02),
+        (40, 193, 0x03),
+        (2, 200, 0x02),
+        (9, 200, 0x02),
+        (23, 200, 0x02),
+        (40, 200, 0x03),
+        (2, 201, 0x02),
+        (9, 201, 0x02),
+        (23, 201, 0x02),
+        (40, 201, 0x03),
+    ],
+    // 194
+    [
+        (3, 192, 0x02),
+        (6, 192, 0x02),
+        (10, 192, 0x02),
+        (15, 192, 0x02),
+        (24, 192, 0x02),
+        (31, 192, 0x02),
+        (41, 192, 0x02),
+        (56, 192, 0x03),
+        (3, 193, 0x02),
+        (6, 193, 0x02),
+        (10, 193, 0x02),
+        (15, 193, 0x02),
+        (24, 193, 0x02),
+        (31, 193, 0x02),
+        (41, 193, 0x02),
+        (56, 193, 0x03),
+    ],
+    // 195
+    [
+        (3, 200, 0x02),
+        (6, 200, 0x02),
+        (10, 200, 0x02),
+        (15, 200, 0x02),
+        (24, 200, 0x02),
+        (31, 200, 0x02),
+        (41, 200, 0x02),
+        (56, 200, 0x03),
+        (3, 201, 0x02),
+        (6, 201, 0x02),
+        (10, 201, 0x02),
+        (15, 201, 0x02),
+        (24, 201, 0x02),
+        (31, 201, 0x02),
+        (41, 201, 0x02),
+        (56, 201, 0x03),
+    ],
+    // 196
+    [
+        (2, 202, 0x02),
+        (9, 202, 0x02),
+        (23, 202, 0x02),
+        (40, 202, 0x03),
+        (2, 205, 0x02),
+        (9, 205, 0x02),
+        (23, 205, 0x02),
+        (40, 205, 0x03),
+        (2, 210, 0x02),
+        (9, 210, 0x02),
+        (23, 210, 0x02),
+        (40, 210, 0x03),
+        (2, 213, 0x02),
+        (9, 213, 0x02),
+        (23, 213, 0x02),
+        (40, 213, 0x03),
+    ],
+    // 197
+    [
+        (3, 202, 0x02),
+        (6, 202, 0x02),
+        (10, 202, 0x02),
+        (15, 202, 0x02),
+        (24, 202, 0x02),
+        (31, 202, 0x02),
+        (41, 202, 0x02),
+        (56, 202, 0x03),
+        (3, 205, 0x02),
+        (6, 205, 0x02),
+        (10, 205, 0x02),
+        (15, 205, 0x02),
+        (24, 205, 0x02),
+        (31, 205, 0x02),
+        (41, 205, 0x02),
+        (56, 205, 0x03),
+    ],
+    // 198
+    [
+        (3, 210, 0x02),
+        (6, 210, 0x02),
+        (10, 210, 0x02),
+        (15, 210, 0x02),
+        (24, 210, 0x02),
+        (31, 210, 0x02),
+        (41, 210, 0x02),
+        (56, 210, 0x03),
+        (3, 213, 0x02),
+        (6, 213, 0x02),
+        (10, 213, 0x02),
+        (15, 213, 0x02),
+        (24, 213, 0x02),
+        (31, 213, 0x02),
+        (41, 213, 0x02),
+        (56, 213, 0x03),
+    ],
+    // 199
+    [
+        (1, 218, 0x02),
+        (22, 218, 0x03),
+        (1, 219, 0x02),
+        (22, 219, 0x03),
+        (1, 238, 0x02),
+        (22, 238, 0x03),
+        (1, 240, 0x02),
+        (22, 240, 0x03),
+        (1, 242, 0x02),
+        (22, 242, 0x03),
+        (1, 243, 0x02),
+        (22, 243, 0x03),
+        (1, 255, 0x02),
+        (22, 255, 0x03),
+        (0, 203, 0x02),
+        (0, 204, 0x02),
+    ],
+    // 200
+    [
+        (2, 218, 0x02),
+        (9, 218, 0x02),
+        (23, 218, 0x02),
+        (40, 218, 0x03),
+        (2, 219, 0x02),
+        (9, 219, 0x02),
+        (23, 219, 0x02),
+        (40, 219, 0x03),
+        (2, 238, 0x02),
+        (9, 238, 0x02),
+        (23, 238, 0x02),
+        (40, 238, 0x03),
+        (2, 240, 0x02),
+        (9, 240, 0x02),
+        (23, 240, 0x02),
+        (40, 240, 0x03),
+    ],
+    // 201
+    [
+        (3, 218, 0x02),
+        (6, 218, 0x02),
+        (10, 218, 0x02),
+        (15, 218, 0x02),
+        (24, 218, 0x02),
+        (31, 218, 0x02),
+        (41, 218, 0x02),
+        (56, 218, 0x03),
+        (3, 219, 0x02),
+        (6, 219, 0x02),
+        (10, 219, 0x02),
+        (15, 219, 0x02),
+        (24, 219, 0x02),
+        (31, 219, 0x02),
+        (41, 219, 0x02),
+        (56, 219, 0x03),
+    ],
+    // 202
+    [
+        (3, 238, 0x02),
+        (6, 238, 0x02),
+        (10, 238, 0x02),
+        (15, 238, 0x02),
+        (24, 238, 0x02),
+        (31, 238, 0x02),
+        (41, 238, 0x02),
+        (56, 238, 0x03),
+        (3, 240, 0x02),
+        (6, 240, 0x02),
+        (10, 240, 0x02),
+        (15, 240, 0x02),
+        (24, 240, 0x02),
+        (31, 240, 0x02),
+        (41, 240, 0x02),
+        (56, 240, 0x03),
+    ],
+    // 203
+    [
+        (2, 242, 0x02),
+        (9, 242, 0x02),
+        (23, 242, 0x02),
+        (40, 242, 0x03),
+        (2, 243, 0x02),
+        (9, 243, 0x02),
+        (23, 243, 0x02),
+        (40, 243, 0x03),
+        (2, 255, 0x02),
+        (9, 255, 0x02),
+        (23, 255, 0x02),
+        (40, 255, 0x03),
+        (1, 203, 0x02),
+        (22, 203, 0x03),
+        (1, 204, 0x02),
+        (22, 204, 0x03),
+    ],
+    // 204
+    [
+        (3, 242, 0x02),
+        (6, 242, 0x02),
+        (10, 242, 0x02),
+        (15, 242, 0x02),
+        (24, 242, 0x02),
+        (31, 242, 0x02),
+        (41, 242, 0x02),
+        (56, 242, 0x03),
+        (3, 243, 0x02),
+        (6, 243, 0x02),
+        (10, 243, 0x02),
+        (15, 243, 0x02),
+        (24, 243, 0x02),
+        (31, 243, 0x02),
+        (41, 243, 0x02),
+        (56, 243, 0x03),
+    ],
+    // 205
+    [
+        (3, 255, 0x02),
+        (6, 255, 0x02),
+        (10, 255, 0x02),
+        (15, 255, 0x02),
+        (24, 255, 0x02),
+        (31, 255, 0x02),
+        (41, 255, 0x02),
+        (56, 255, 0x03),
+        (2, 203, 0x02),
+        (9, 203, 0x02),
+        (23, 203, 0x02),
+        (40, 203, 0x03),
+        (2, 204, 0x02),
+        (9, 204, 0x02),
+        (23, 204, 0x02),
+        (40, 204, 0x03),
+    ],
+    // 206
+    [
+        (3, 203, 0x02),
+        (6, 203, 0x02),
+        (10, 203, 0x02),
+        (15, 203, 0x02),
+        (24, 203, 0x02),
+        (31, 203, 0x02),
+        (41, 203, 0x02),
+        (56, 203, 0x03),
+        (3, 204, 0x02),
+        (6, 204, 0x02),
+        (10, 204, 0x02),
+        (15, 204, 0x02),
+        (24, 204, 0x02),
+        (31, 204, 0x02),
+        (41, 204, 0x02),
+        (56, 204, 0x03),
+    ],
+    // 207
+    [
+        (211, 0, 0x00),
+        (212, 0, 0x00),
+        (214, 0, 0x00),
+        (215, 0, 0x00),
+        (218, 0, 0x00),
+        (219, 0, 0x00),
+        (221, 0, 0x00),
+        (222, 0, 0x00),
+        (226, 0, 0x00),
+        (228, 0, 0x00),
+        (232, 0, 0x00),
+        (235, 0, 0x00),
+        (240, 0, 0x00),
+        (243, 0, 0x00),
+        (247, 0, 0x00),
+        (250, 0, 0x00),
+    ],
+    // 208
+    [
+        (0, 211, 0x02),
+        (0, 212, 0x02),
+        (0, 214, 0x02),
+        (0, 221, 0x02),
+        (0, 222, 0x02),
+        (0, 223, 0x02),
+        (0, 241, 0x02),
+        (0, 244, 0x02),
+        (0, 245, 0x02),
+        (0, 246, 0x02),
+        (0, 247, 0x02),
+        (0, 248, 0x02),
+        (0, 250, 0x02),
+        (0, 251, 0x02),
+        (0, 252, 0x02),
+        (0, 253, 0x02),
+    ],
+    // 209
+    [
+        (1, 211, 0x02),
+        (22, 211, 0x03),
+        (1, 212, 0x02),
+        (22, 212, 0x03),
+        (1, 214, 0x02),
+        (22, 214, 0x03),
+        (1, 221, 0x02),
+        (22, 221, 0x03),
+        (1, 222, 0x02),
+        (22, 222, 0x03),
+        (1, 223, 0x02),
+        (22, 223, 0x03),
+        (1, 241, 0x02),
+        (22, 241, 0x03),
+        (1, 244, 0x02),
+        (22, 244, 0x03),
+    ],
+    // 210
+    [
+        (2, 211, 0x02),
+        (9, 211, 0x02),
+        (23, 211, 0x02),
+        (40, 211, 0x03),
+        (2, 212, 0x02),
+        (9, 212, 0x02),
+        (23, 212, 0x02),
+        (40, 212, 0x03),
+        (2, 214, 0x02),
+        (9, 214, 0x02),
+        (23, 214, 0x02),
+        (40, 214, 0x03),
+        (2, 221, 0x02),
+        (9, 221, 0x02),
+        (23, 221, 0x02),
+        (40, 221, 0x03),
+    ],
+    // 211
+    [
+        (3, 211, 0x02),
+        (6, 211, 0x02),
+        (10, 211, 0x02),
+        (15, 211, 0x02),
+        (24, 211, 0x02),
+        (31, 211, 0x02),
+        (41, 211, 0x02),
+        (56, 211, 0x03),
+        (3, 212, 0x02),
+        (6, 212, 0x02),
+        (10, 212, 0x02),
+        (15, 212, 0x02),
+        (24, 212, 0x02),
+        (31, 212, 0x02),
+        (41, 212, 0x02),
+        (56, 212, 0x03),
+    ],
+    // 212
+    [
+        (3, 214, 0x02),
+        (6, 214, 0x02),
+        (10, 214, 0x02),
+        (15, 214, 0x02),
+        (24, 214, 0x02),
+        (31, 214, 0x02),
+        (41, 214, 0x02),
+        (56, 214, 0x03),
+        (3, 221, 0x02),
+        (6, 221, 0x02),
+        (10, 221, 0x02),
+        (15, 221, 0x02),
+        (24, 221, 0x02),
+        (31, 221, 0x02),
+        (41, 221, 0x02),
+        (56, 221, 0x03),
+    ],
+    // 213
+    [
+        (2, 222, 0x02),
+        (9, 222, 0x02),
+        (23, 222, 0x02),
+        (40, 222, 0x03),
+        (2, 223, 0x02),
+        (9, 223, 0x02),
+        (23, 223, 0x02),
+        (40, 223, 0x03),
+        (2, 241, 0x02),
+        (9, 241, 0x02),
+        (23, 241, 0x02),
+        (40, 241, 0x03),
+        (2, 244, 0x02),
+        (9, 244, 0x02),
+        (23, 244, 0x02),
+        (40, 244, 0x03),
+    ],
+    // 214
+    [
+        (3, 222, 0x02),
+        (6, 222, 0x02),
+        (10, 222, 0x02),
+        (15, 222, 0x02),
+        (24, 222, 0x02),
+        (31, 222, 0x02),
+        (41, 222, 0x02),
+        (56, 222, 0x03),
+        (3, 223, 0x02),
+        (6, 223, 0x02),
+        (10, 223, 0x02),
+        (15, 223, 0x02),
+        (24, 223, 0x02),
+        (31, 223, 0x02),
+        (41, 223, 0x02),
+        (56, 223, 0x03),
+    ],
+    // 215
+    [
+        (3, 241, 0x02),
+        (6, 241, 0x02),
+        (10, 241, 0x02),
+        (15, 241, 0x02),
+        (24, 241, 0x02),
+        (31, 241, 0x02),
+        (41, 241, 0x02),
+        (56, 241, 0x03),
+        (3, 244, 0x02),
+        (6, 244, 0x02),
+        (10, 244, 0x02),
+        (15, 244, 0x02),
+        (24, 244, 0x02),
+        (31, 244, 0x02),
+        (41, 244, 0x02),
+        (56, 244, 0x03),
+    ],
+    // 216
+    [
+        (1, 245, 0x02),
+        (22, 245, 0x03),
+        (1, 246, 0x02),
+        (22, 246, 0x03),
+        (1, 247, 0x02),
+        (22, 247, 0x03),
+        (1, 248, 0x02),
+        (22, 248, 0x03),
+        (1, 250, 0x02),
+        (22, 250, 0x03),
+        (1, 251, 0x02),
+        (22, 251, 0x03),
+        (1, 252, 0x02),
+        (22, 252, 0x03),
+        (1, 253, 0x02),
+        (22, 253, 0x03),
+    ],
+    // 217
+    [
+        (2, 245, 0x02),
+        (9, 245, 0x02),
+        (23, 245, 0x02),
+        (40, 245, 0x03),
+        (2, 246, 0x02),
+        (9, 246, 0x02),
+        (23, 246, 0x02),
+        (40, 246, 0x03),
+        (2, 247, 0x02),
+        (9, 247, 0x02),
+        (23, 247, 0x02),
+        (40, 247, 0x03),
+        (2, 248, 0x02),
+        (9, 248, 0x02),
+        (23, 248, 0x02),
+        (40, 248, 0x03),
+    ],
+    // 218
+    [
+        (3, 245, 0x02),
+        (6, 245, 0x02),
+        (10, 245, 0x02),
+        (15, 245, 0x02),
+        (24, 245, 0x02),
+        (31, 245, 0x02),
+        (41, 245, 0x02),
+        (56, 245, 0x03),
+        (3, 246, 0x02),
+        (6, 246, 0x02),
+        (10, 246, 0x02),
+        (15, 246, 0x02),
+        (24, 246, 0x02),
+        (31, 246, 0x02),
+        (41, 246, 0x02),
+        (56, 246, 0x03),
+    ],
+    // 219
+    [
+        (3, 247, 0x02),
+        (6, 247, 0x02),
+        (10, 247, 0x02),
+        (15, 247, 0x02),
+        (24, 247, 0x02),
+        (31, 247, 0x02),
+        (41, 247, 0x02),
+        (56, 247, 0x03),
+        (3, 248, 0x02),
+        (6, 248, 0x02),
+        (10, 248, 0x02),
+        (15, 248, 0x02),
+        (24, 248, 0x02),
+        (31, 248, 0x02),
+        (41, 248, 0x02),
+        (56, 248, 0x03),
+    ],
+    // 220
+    [
+        (2, 250, 0x02),
+        (9, 250, 0x02),
+        (23, 250, 0x02),
+        (40, 250, 0x03),
+        (2, 251, 0x02),
+        (9, 251, 0x02),
+        (23, 251, 0x02),
+        (40, 251, 0x03),
+        (2, 252, 0x02),
+        (9, 252, 0x02),
+        (23, 252, 0x02),
+        (40, 252, 0x03),
+        (2, 253, 0x02),
+        (9, 253, 0x02),
+        (23, 253, 0x02),
+        (40, 253, 0x03),
+    ],
+    // 221
+    [
+        (3, 250, 0x02),
+        (6, 250, 0x02),
+        (10, 250, 0x02),
+        (15, 250, 0x02),
+        (24, 250, 0x02),
+        (31, 250, 0x02),
+        (41, 250, 0x02),
+        (56, 250, 0x03),
+        (3, 251, 0x02),
+        (6, 251, 0x02),
+        (10, 251, 0x02),
+        (15, 251, 0x02),
+        (24, 251, 0x02),
+        (31, 251, 0x02),
+        (41, 251, 0x02),
+        (56, 251, 0x03),
+    ],
+    // 222
+    [
+        (3, 252, 0x02),
+        (6, 252, 0x02),
+        (10, 252, 0x02),
+        (15, 252, 0x02),
+        (24, 252, 0x02),
+        (31, 252, 0x02),
+        (41, 252, 0x02),
+        (56, 252, 0x03),
+        (3, 253, 0x02),
+        (6, 253, 0x02),
+        (10, 253, 0x02),
+        (15, 253, 0x02),
+        (24, 253, 0x02),
+        (31, 253, 0x02),
+        (41, 253, 0x02),
+        (56, 253, 0x03),
+    ],
+    // 223
+    [
+        (0, 254, 0x02),
+        (227, 0, 0x00),
+        (229, 0, 0x00),
+        (230, 0, 0x00),
+        (233, 0, 0x00),
+        (234, 0, 0x00),
+        (236, 0, 0x00),
+        (237, 0, 0x00),
+        (241, 0, 0x00),
+        (242, 0, 0x00),
+        (244, 0, 0x00),
+        (245, 0, 0x00),
+        (248, 0, 0x00),
+        (249, 0, 0x00),
+        (251, 0, 0x00),
+        (252, 0, 0x00),
+    ],
+    // 224
+    [
+        (1, 254, 0x02),
+        (22, 254, 0x03),
+        (0, 2, 0x02),
+        (0, 3, 0x02),
+        (0, 4, 0x02),
+        (0, 5, 0x02),
+        (0, 6, 0x02),
+        (0, 7, 0x02),
+        (0, 8, 0x02),
+        (0, 11, 0x02),
+        (0, 12, 0x02),
+        (0, 14, 0x02),
+        (0, 15, 0x02),
+        (0, 16, 0x02),
+        (0, 17, 0x02),
+        (0, 18, 0x02),
+    ],
+    // 225
+    [
+        (2, 254, 0x02),
+        (9, 254, 0x02),
+        (23, 254, 0x02),
+        (40, 254, 0x03),
+        (1, 2, 0x02),
+        (22, 2, 0x03),
+        (1, 3, 0x02),
+        (22, 3, 0x03),
+        (1, 4, 0x02),
+        (22, 4, 0x03),
+        (1, 5, 0x02),
+        (22, 5, 0x03),
+        (1, 6, 0x02),
+        (22, 6, 0x03),
+        (1, 7, 0x02),
+        (22, 7, 0x03),
+    ],
+    // 226
+    [
+        (3, 254, 0x02),
+        (6, 254, 0x02),
+        (10, 254, 0x02),
+        (15, 254, 0x02),
+        (24, 254, 0x02),
+        (31, 254, 0x02),
+        (41, 254, 0x02),
+        (56, 254, 0x03),
+        (2, 2, 0x02),
+        (9, 2, 0x02),
+        (23, 2, 0x02),
+        (40, 2, 0x03),
+        (2, 3, 0x02),
+        (9, 3, 0x02),
+        (23, 3, 0x02),
+        (40, 3, 0x03),
+    ],
+    // 227
+    [
+        (3, 2, 0x02),
+        (6, 2, 0x02),
+        (10, 2, 0x02),
+        (15, 2, 0x02),
+        (24, 2, 0x02),
+        (31, 2, 0x02),
+        (41, 2, 0x02),
+        (56, 2, 0x03),
+        (3, 3, 0x02),
+        (6, 3, 0x02),
+        (10, 3, 0x02),
+        (15, 3, 0x02),
+        (24, 3, 0x02),
+        (31, 3, 0x02),
+        (41, 3, 0x02),
+        (56, 3, 0x03),
+    ],
+    // 228
+    [
+        (2, 4, 0x02),
+        (9, 4, 0x02),
+        (23, 4, 0x02),
+        (40, 4, 0x03),
+        (2, 5, 0x02),
+        (9, 5, 0x02),
+        (23, 5, 0x02),
+        (40, 5, 0x03),
+        (2, 6, 0x02),
+        (9, 6, 0x02),
+        (23, 6, 0x02),
+        (40, 6, 0x03),
+        (2, 7, 0x02),
+        (9, 7, 0x02),
+        (23, 7, 0x02),
+        (40, 7, 0x03),
+    ],
+    // 229
+    [
+        (3, 4, 0x02),
+        (6, 4, 0x02),
+        (10, 4, 0x02),
+        (15, 4, 0x02),
+        (24, 4, 0x02),
+        (31, 4, 0x02),
+        (41, 4, 0x02),
+        (56, 4, 0x03),
+        (3, 5, 0x02),
+        (6, 5, 0x02),
+        (10, 5, 0x02),
+        (15, 5, 0x02),
+        (24, 5, 0x02),
+        (31, 5, 0x02),
+        (41, 5, 0x02),
+        (56, 5, 0x03),
+    ],
+    // 230
+    [
+        (3, 6, 0x02),
+        (6, 6, 0x02),
+        (10, 6, 0x02),
+        (15, 6, 0x02),
+        (24, 6, 0x02),
+        (31, 6, 0x02),
+        (41, 6, 0x02),
+        (56, 6, 0x03),
+        (3, 7, 0x02),
+        (6, 7, 0x02),
+        (10, 7, 0x02),
+        (15, 7, 0x02),
+        (24, 7, 0x02),
+        (31, 7, 0x02),
+        (41, 7, 0x02),
+        (56, 7, 0x03),
+    ],
+    // 231
+    [
+        (1, 8, 0x02),
+        (22, 8, 0x03),
+        (1, 11, 0x02),
+        (22, 11, 0x03),
+        (1, 12, 0x02),
+        (22, 12, 0x03),
+        (1, 14, 0x02),
+        (22, 14, 0x03),
+        (1, 15, 0x02),
+        (22, 15, 0x03),
+        (1, 16, 0x02),
+        (22, 16, 0x03),
+        (1, 17, 0x02),
+        (22, 17, 0x03),
+        (1, 18, 0x02),
+        (22, 18, 0x03),
+    ],
+    // 232
+    [
+        (2, 8, 0x02),
+        (9, 8, 0x02),
+        (23, 8, 0x02),
+        (40, 8, 0x03),
+        (2, 11, 0x02),
+        (9, 11, 0x02),
+        (23, 11, 0x02),
+        (40, 11, 0x03),
+        (2, 12, 0x02),
+        (9, 12, 0x02),
+        (23, 12, 0x02),
+        (40, 12, 0x03),
+        (2, 14, 0x02),
+        (9, 14, 0x02),
+        (23, 14, 0x02),
+        (40, 14, 0x03),
+    ],
+    // 233
+    [
+        (3, 8, 0x02),
+        (6, 8, 0x02),
+        (10, 8, 0x02),
+        (15, 8, 0x02),
+        (24, 8, 0x02),
+        (31, 8, 0x02),
+        (41, 8, 0x02),
+        (56, 8, 0x03),
+        (3, 11, 0x02),
+        (6, 11, 0x02),
+        (10, 11, 0x02),
+        (15, 11, 0x02),
+        (24, 11, 0x02),
+        (31, 11, 0x02),
+        (41, 11, 0x02),
+        (56, 11, 0x03),
+    ],
+    // 234
+    [
+        (3, 12, 0x02),
+        (6, 12, 0x02),
+        (10, 12, 0x02),
+        (15, 12, 0x02),
+        (24, 12, 0x02),
+        (31, 12, 0x02),
+        (41, 12, 0x02),
+        (56, 12, 0x03),
+        (3, 14, 0x02),
+        (6, 14, 0x02),
+        (10, 14, 0x02),
+        (15, 14, 0x02),
+        (24, 14, 0x02),
+        (31, 14, 0x02),
+        (41, 14, 0x02),
+        (56, 14, 0x03),
+    ],
+    // 235
+    [
+        (2, 15, 0x02),
+        (9, 15, 0x02),
+        (23, 15, 0x02),
+        (40, 15, 0x03),
+        (2, 16, 0x02),
+        (9, 16, 0x02),
+        (23, 16, 0x02),
+        (40, 16, 0x03),
+        (2, 17, 0x02),
+        (9, 17, 0x02),
+        (23, 17, 0x02),
+        (40, 17, 0x03),
+        (2, 18, 0x02),
+        (9, 18, 0x02),
+        (23, 18, 0x02),
+        (40, 18, 0x03),
+    ],
+    // 236
+    [
+        (3, 15, 0x02),
+        (6, 15, 0x02),
+        (10, 15, 0x02),
+        (15, 15, 0x02),
+        (24, 15, 0x02),
+        (31, 15, 0x02),
+        (41, 15, 0x02),
+        (56, 15, 0x03),
+        (3, 16, 0x02),
+        (6, 16, 0x02),
+        (10, 16, 0x02),
+        (15, 16, 0x02),
+        (24, 16, 0x02),
+        (31, 16, 0x02),
+        (41, 16, 0x02),
+        (56, 16, 0x03),
+    ],
+    // 237
+    [
+        (3, 17, 0x02),
+        (6, 17, 0x02),
+        (10, 17, 0x02),
+        (15, 17, 0x02),
+        (24, 17, 0x02),
+        (31, 17, 0x02),
+        (41, 17, 0x02),
+        (56, 17, 0x03),
+        (3, 18, 0x02),
+        (6, 18, 0x02),
+        (10, 18, 0x02),
+        (15, 18, 0x02),
+        (24, 18, 0x02),
+        (31, 18, 0x02),
+        (41, 18, 0x02),
+        (56, 18, 0x03),
+    ],
+    // 238
+    [
+        (0, 19, 0x02),
+        (0, 20, 0x02),
+        (0, 21, 0x02),
+        (0, 23, 0x02),
+        (0, 24, 0x02),
+        (0, 25, 0x02),
+        (0, 26, 0x02),
+        (0, 27, 0x02),
+        (0, 28, 0x02),
+        (0, 29, 0x02),
+        (0, 30, 0x02),
+        (0, 31, 0x02),
+        (0, 127, 0x02),
+        (0, 220, 0x02),
+        (0, 249, 0x02),
+        (253, 0, 0x00),
+    ],
+    // 239
+    [
+        (1, 19, 0x02),
+        (22, 19, 0x03),
+        (1, 20, 0x02),
+        (22, 20, 0x03),
+        (1, 21, 0x02),
+        (22, 21, 0x03),
+        (1, 23, 0x02),
+        (22, 23, 0x03),
+        (1, 24, 0x02),
+        (22, 24, 0x03),
+        (1, 25, 0x02),
+        (22, 25, 0x03),
+        (1, 26, 0x02),
+        (22, 26, 0x03),
+        (1, 27, 0x02),
+        (22, 27, 0x03),
+    ],
+    // 240
+    [
+        (2, 19, 0x02),
+        (9, 19, 0x02),
+        (23, 19, 0x02),
+        (40, 19, 0x03),
+        (2, 20, 0x02),
+        (9, 20, 0x02),
+        (23, 20, 0x02),
+        (40, 20, 0x03),
+        (2, 21, 0x02),
+        (9, 21, 0x02),
+        (23, 21, 0x02),
+        (40, 21, 0x03),
+        (2, 23, 0x02),
+        (9, 23, 0x02),
+        (23, 23, 0x02),
+        (40, 23, 0x03),
+    ],
+    // 241
+    [
+        (3, 19, 0x02),
+        (6, 19, 0x02),
+        (10, 19, 0x02),
+        (15, 19, 0x02),
+        (24, 19, 0x02),
+        (31, 19, 0x02),
+        (41, 19, 0x02),
+        (56, 19, 0x03),
+        (3, 20, 0x02),
+        (6, 20, 0x02),
+        (10, 20, 0x02),
+        (15, 20, 0x02),
+        (24, 20, 0x02),
+        (31, 20, 0x02),
+        (41, 20, 0x02),
+        (56, 20, 0x03),
+    ],
+    // 242
+    [
+        (3, 21, 0x02),
+        (6, 21, 0x02),
+        (10, 21, 0x02),
+        (15, 21, 0x02),
+        (24, 21, 0x02),
+        (31, 21, 0x02),
+        (41, 21, 0x02),
+        (56, 21, 0x03),
+        (3, 23, 0x02),
+        (6, 23, 0x02),
+        (10, 23, 0x02),
+        (15, 23, 0x02),
+        (24, 23, 0x02),
+        (31, 23, 0x02),
+        (41, 23, 0x02),
+        (56, 23, 0x03),
+    ],
+    // 243
+    [
+        (2, 24, 0x02),
+        (9, 24, 0x02),
+        (23, 24, 0x02),
+        (40, 24, 0x03),
+        (2, 25, 0x02),
+        (9, 25, 0x02),
+        (23, 25, 0x02),
+        (40, 25, 0x03),
+        (2, 26, 0x02),
+        (9, 26, 0x02),
+        (23, 26, 0x02),
+        (40, 26, 0x03),
+        (2, 27, 0x02),
+        (9, 27, 0x02),
+        (23, 27, 0x02),
+        (40, 27, 0x03),
+    ],
+    // 244
+    [
+        (3, 24, 0x02),
+        (6, 24, 0x02),
+        (10, 24, 0x02),
+        (15, 24, 0x02),
+        (24, 24, 0x02),
+        (31, 24, 0x02),
+        (41, 24, 0x02),
+        (56, 24, 0x03),
+        (3, 25, 0x02),
+        (6, 25, 0x02),
+        (10, 25, 0x02),
+        (15, 25, 0x02),
+        (24, 25, 0x02),
+        (31, 25, 0x02),
+        (41, 25, 0x02),
+        (56, 25, 0x03),
+    ],
+    // 245
+    [
+        (3, 26, 0x02),
+        (6, 26, 0x02),
+        (10, 26, 0x02),
+        (15, 26, 0x02),
+        (24, 26, 0x02),
+        (31, 26, 0x02),
+        (41, 26, 0x02),
+        (56, 26, 0x03),
+        (3, 27, 0x02),
+        (6, 27, 0x02),
+        (10, 27, 0x02),
+        (15, 27, 0x02),
+        (24, 27, 0x02),
+        (31, 27, 0x02),
+        (41, 27, 0x02),
+        (56, 27, 0x03),
+    ],
+    // 246
+    [
+        (1, 28, 0x02),
+        (22, 28, 0x03),
+        (1, 29, 0x02),
+        (22, 29, 0x03),
+        (1, 30, 0x02),
+        (22, 30, 0x03),
+        (1, 31, 0x02),
+        (22, 31, 0x03),
+        (1, 127, 0x02),
+        (22, 127, 0x03),
+        (1, 220, 0x02),
+        (22, 220, 0x03),
+        (1, 249, 0x02),
+        (22, 249, 0x03),
+        (254, 0, 0x00),
+        (255, 0, 0x00),
+    ],
+    // 247
+    [
+        (2, 28, 0x02),
+        (9, 28, 0x02),
+        (23, 28, 0x02),
+        (40, 28, 0x03),
+        (2, 29, 0x02),
+        (9, 29, 0x02),
+        (23, 29, 0x02),
+        (40, 29, 0x03),
+        (2, 30, 0x02),
+        (9, 30, 0x02),
+        (23, 30, 0x02),
+        (40, 30, 0x03),
+        (2, 31, 0x02),
+        (9, 31, 0x02),
+        (23, 31, 0x02),
+        (40, 31, 0x03),
+    ],
+    // 248
+    [
+        (3, 28, 0x02),
+        (6, 28, 0x02),
+        (10, 28, 0x02),
+        (15, 28, 0x02),
+        (24, 28, 0x02),
+        (31, 28, 0x02),
+        (41, 28, 0x02),
+        (56, 28, 0x03),
+        (3, 29, 0x02),
+        (6, 29, 0x02),
+        (10, 29, 0x02),
+        (15, 29, 0x02),
+        (24, 29, 0x02),
+        (31, 29, 0x02),
+        (41, 29, 0x02),
+        (56, 29, 0x03),
+    ],
+    // 249
+    [
+        (3, 30, 0x02),
+        (6, 30, 0x02),
+        (10, 30, 0x02),
+        (15, 30, 0x02),
+        (24, 30, 0x02),
+        (31, 30, 0x02),
+        (41, 30, 0x02),
+        (56, 30, 0x03),
+        (3, 31, 0x02),
+        (6, 31, 0x02),
+        (10, 31, 0x02),
+        (15, 31, 0x02),
+        (24, 31, 0x02),
+        (31, 31, 0x02),
+        (41, 31, 0x02),
+        (56, 31, 0x03),
+    ],
+    // 250
+    [
+        (2, 127, 0x02),
+        (9, 127, 0x02),
+        (23, 127, 0x02),
+        (40, 127, 0x03),
+        (2, 220, 0x02),
+        (9, 220, 0x02),
+        (23, 220, 0x02),
+        (40, 220, 0x03),
+        (2, 249, 0x02),
+        (9, 249, 0x02),
+        (23, 249, 0x02),
+        (40, 249, 0x03),
+        (0, 10, 0x02),
+        (0, 13, 0x02),
+        (0, 22, 0x02),
+        (0, 0, 0x04),
+    ],
+    // 251
+    [
+        (3, 127, 0x02),
+        (6, 127, 0x02),
+        (10, 127, 0x02),
+        (15, 127, 0x02),
+        (24, 127, 0x02),
+        (31, 127, 0x02),
+        (41, 127, 0x02),
+        (56, 127, 0x03),
+        (3, 220, 0x02),
+        (6, 220, 0x02),
+        (10, 220, 0x02),
+        (15, 220, 0x02),
+        (24, 220, 0x02),
+        (31, 220, 0x02),
+        (41, 220, 0x02),
+        (56, 220, 0x03),
+    ],
+    // 252
+    [
+        (3, 249, 0x02),
+        (6, 249, 0x02),
+        (10, 249, 0x02),
+        (15, 249, 0x02),
+        (24, 249, 0x02),
+        (31, 249, 0x02),
+        (41, 249, 0x02),
+        (56, 249, 0x03),
+        (1, 10, 0x02),
+        (22, 10, 0x03),
+        (1, 13, 0x02),
+        (22, 13, 0x03),
+        (1, 22, 0x02),
+        (22, 22, 0x03),
+        (0, 0, 0x04),
+        (0, 0, 0x05),
+    ],
+    // 253
+    [
+        (2, 10, 0x02),
+        (9, 10, 0x02),
+        (23, 10, 0x02),
+        (40, 10, 0x03),
+        (2, 13, 0x02),
+        (9, 13, 0x02),
+        (23, 13, 0x02),
+        (40, 13, 0x03),
+        (2, 22, 0x02),
+        (9, 22, 0x02),
+        (23, 22, 0x02),
+        (40, 22, 0x03),
+        (0, 0, 0x04),
+        (0, 0, 0x04),
+        (0, 0, 0x04),
+        (0, 0, 0x05),
+    ],
+    // 254
+    [
+        (3, 10, 0x02),
+        (6, 10, 0x02),
+        (10, 10, 0x02),
+        (15, 10, 0x02),
+        (24, 10, 0x02),
+        (31, 10, 0x02),
+        (41, 10, 0x02),
+        (56, 10, 0x03),
+        (3, 13, 0x02),
+        (6, 13, 0x02),
+        (10, 13, 0x02),
+        (15, 13, 0x02),
+        (24, 13, 0x02),
+        (31, 13, 0x02),
+        (41, 13, 0x02),
+        (56, 13, 0x03),
+    ],
+    // 255
+    [
+        (3, 22, 0x02),
+        (6, 22, 0x02),
+        (10, 22, 0x02),
+        (15, 22, 0x02),
+        (24, 22, 0x02),
+        (31, 22, 0x02),
+        (41, 22, 0x02),
+        (56, 22, 0x03),
+        (0, 0, 0x04),
+        (0, 0, 0x04),
+        (0, 0, 0x04),
+        (0, 0, 0x04),
+        (0, 0, 0x04),
+        (0, 0, 0x04),
+        (0, 0, 0x04),
+        (0, 0, 0x05),
+    ],
+];
diff --git a/src/hpack/mod.rs b/src/hpack/mod.rs
new file mode 100644
index 0000000..12c75d5
--- /dev/null
+++ b/src/hpack/mod.rs
@@ -0,0 +1,12 @@
+mod decoder;
+mod encoder;
+pub(crate) mod header;
+pub(crate) mod huffman;
+mod table;
+
+#[cfg(test)]
+mod test;
+
+pub use self::decoder::{Decoder, DecoderError, NeedMore};
+pub use self::encoder::Encoder;
+pub use self::header::{BytesStr, Header};
diff --git a/src/hpack/table.rs b/src/hpack/table.rs
new file mode 100644
index 0000000..3e45f41
--- /dev/null
+++ b/src/hpack/table.rs
@@ -0,0 +1,766 @@
+use super::Header;
+
+use fnv::FnvHasher;
+use http::header;
+use http::method::Method;
+
+use std::collections::VecDeque;
+use std::hash::{Hash, Hasher};
+use std::{cmp, mem, usize};
+
+/// HPACK encoder table
+#[derive(Debug)]
+pub struct Table {
+    mask: usize,
+    indices: Vec<Option<Pos>>,
+    slots: VecDeque<Slot>,
+    inserted: usize,
+    // Size is in bytes
+    size: usize,
+    max_size: usize,
+}
+
+#[derive(Debug)]
+pub enum Index {
+    // The header is already fully indexed
+    Indexed(usize, Header),
+
+    // The name is indexed, but not the value
+    Name(usize, Header),
+
+    // The full header has been inserted into the table.
+    Inserted(usize),
+
+    // Only the value has been inserted (hpack table idx, slots idx)
+    InsertedValue(usize, usize),
+
+    // The header is not indexed by this table
+    NotIndexed(Header),
+}
+
+#[derive(Debug)]
+struct Slot {
+    hash: HashValue,
+    header: Header,
+    next: Option<usize>,
+}
+
+#[derive(Debug, Clone, Copy, Eq, PartialEq)]
+struct Pos {
+    index: usize,
+    hash: HashValue,
+}
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+struct HashValue(usize);
+
+const MAX_SIZE: usize = 1 << 16;
+const DYN_OFFSET: usize = 62;
+
+macro_rules! probe_loop {
+    ($probe_var: ident < $len: expr, $body: expr) => {
+        debug_assert!($len > 0);
+        loop {
+            if $probe_var < $len {
+                $body
+                $probe_var += 1;
+            } else {
+                $probe_var = 0;
+            }
+        }
+    };
+}
+
+impl Table {
+    pub fn new(max_size: usize, capacity: usize) -> Table {
+        if capacity == 0 {
+            Table {
+                mask: 0,
+                indices: vec![],
+                slots: VecDeque::new(),
+                inserted: 0,
+                size: 0,
+                max_size,
+            }
+        } else {
+            let capacity = cmp::max(to_raw_capacity(capacity).next_power_of_two(), 8);
+
+            Table {
+                mask: capacity.wrapping_sub(1),
+                indices: vec![None; capacity],
+                slots: VecDeque::with_capacity(usable_capacity(capacity)),
+                inserted: 0,
+                size: 0,
+                max_size,
+            }
+        }
+    }
+
+    #[inline]
+    pub fn capacity(&self) -> usize {
+        usable_capacity(self.indices.len())
+    }
+
+    pub fn max_size(&self) -> usize {
+        self.max_size
+    }
+
+    /// Gets the header stored in the table
+    pub fn resolve<'a>(&'a self, index: &'a Index) -> &'a Header {
+        use self::Index::*;
+
+        match *index {
+            Indexed(_, ref h) => h,
+            Name(_, ref h) => h,
+            Inserted(idx) => &self.slots[idx].header,
+            InsertedValue(_, idx) => &self.slots[idx].header,
+            NotIndexed(ref h) => h,
+        }
+    }
+
+    pub fn resolve_idx(&self, index: &Index) -> usize {
+        use self::Index::*;
+
+        match *index {
+            Indexed(idx, ..) => idx,
+            Name(idx, ..) => idx,
+            Inserted(idx) => idx + DYN_OFFSET,
+            InsertedValue(_name_idx, slot_idx) => slot_idx + DYN_OFFSET,
+            NotIndexed(_) => panic!("cannot resolve index"),
+        }
+    }
+
+    /// Index the header in the HPACK table.
+    pub fn index(&mut self, header: Header) -> Index {
+        // Check the static table
+        let statik = index_static(&header);
+
+        // Don't index certain headers. This logic is borrowed from nghttp2.
+        if header.skip_value_index() {
+            // Right now, if this is true, the header name is always in the
+            // static table. At some point in the future, this might not be true
+            // and this logic will need to be updated.
+            debug_assert!(statik.is_some(), "skip_value_index requires a static name",);
+            return Index::new(statik, header);
+        }
+
+        // If the header is already indexed by the static table, return that
+        if let Some((n, true)) = statik {
+            return Index::Indexed(n, header);
+        }
+
+        // Don't index large headers
+        if header.len() * 4 > self.max_size * 3 {
+            return Index::new(statik, header);
+        }
+
+        self.index_dynamic(header, statik)
+    }
+
+    fn index_dynamic(&mut self, header: Header, statik: Option<(usize, bool)>) -> Index {
+        debug_assert!(self.assert_valid_state("one"));
+
+        if header.len() + self.size < self.max_size || !header.is_sensitive() {
+            // Only grow internal storage if needed
+            self.reserve_one();
+        }
+
+        if self.indices.is_empty() {
+            // If `indices` is not empty, then it is impossible for all
+            // `indices` entries to be `Some`. So, we only need to check for the
+            // empty case.
+            return Index::new(statik, header);
+        }
+
+        let hash = hash_header(&header);
+
+        let desired_pos = desired_pos(self.mask, hash);
+        let mut probe = desired_pos;
+        let mut dist = 0;
+
+        // Start at the ideal position, checking all slots
+        probe_loop!(probe < self.indices.len(), {
+            if let Some(pos) = self.indices[probe] {
+                // The slot is already occupied, but check if it has a lower
+                // displacement.
+                let their_dist = probe_distance(self.mask, pos.hash, probe);
+
+                let slot_idx = pos.index.wrapping_add(self.inserted);
+
+                if their_dist < dist {
+                    // Index robinhood
+                    return self.index_vacant(header, hash, dist, probe, statik);
+                } else if pos.hash == hash && self.slots[slot_idx].header.name() == header.name() {
+                    // Matching name, check values
+                    return self.index_occupied(header, hash, pos.index, statik.map(|(n, _)| n));
+                }
+            } else {
+                return self.index_vacant(header, hash, dist, probe, statik);
+            }
+
+            dist += 1;
+        });
+    }
+
+    fn index_occupied(
+        &mut self,
+        header: Header,
+        hash: HashValue,
+        mut index: usize,
+        statik: Option<usize>,
+    ) -> Index {
+        debug_assert!(self.assert_valid_state("top"));
+
+        // There already is a match for the given header name. Check if a value
+        // matches. The header will also only be inserted if the table is not at
+        // capacity.
+        loop {
+            // Compute the real index into the VecDeque
+            let real_idx = index.wrapping_add(self.inserted);
+
+            if self.slots[real_idx].header.value_eq(&header) {
+                // We have a full match!
+                return Index::Indexed(real_idx + DYN_OFFSET, header);
+            }
+
+            if let Some(next) = self.slots[real_idx].next {
+                index = next;
+                continue;
+            }
+
+            if header.is_sensitive() {
+                // Should we assert this?
+                // debug_assert!(statik.is_none());
+                return Index::Name(real_idx + DYN_OFFSET, header);
+            }
+
+            self.update_size(header.len(), Some(index));
+
+            // Insert the new header
+            self.insert(header, hash);
+
+            // Recompute real_idx as it just changed.
+            let new_real_idx = index.wrapping_add(self.inserted);
+
+            // The previous node in the linked list may have gotten evicted
+            // while making room for this header.
+            if new_real_idx < self.slots.len() {
+                let idx = 0usize.wrapping_sub(self.inserted);
+
+                self.slots[new_real_idx].next = Some(idx);
+            }
+
+            debug_assert!(self.assert_valid_state("bottom"));
+
+            // Even if the previous header was evicted, we can still reference
+            // it when inserting the new one...
+            return if let Some(n) = statik {
+                // If name is in static table, use it instead
+                Index::InsertedValue(n, 0)
+            } else {
+                Index::InsertedValue(real_idx + DYN_OFFSET, 0)
+            };
+        }
+    }
+
+    fn index_vacant(
+        &mut self,
+        header: Header,
+        hash: HashValue,
+        mut dist: usize,
+        mut probe: usize,
+        statik: Option<(usize, bool)>,
+    ) -> Index {
+        if header.is_sensitive() {
+            return Index::new(statik, header);
+        }
+
+        debug_assert!(self.assert_valid_state("top"));
+        debug_assert!(dist == 0 || self.indices[probe.wrapping_sub(1) & self.mask].is_some());
+
+        // Passing in `usize::MAX` for prev_idx since there is no previous
+        // header in this case.
+        if self.update_size(header.len(), None) {
+            while dist != 0 {
+                let back = probe.wrapping_sub(1) & self.mask;
+
+                if let Some(pos) = self.indices[back] {
+                    let their_dist = probe_distance(self.mask, pos.hash, back);
+
+                    if their_dist < (dist - 1) {
+                        probe = back;
+                        dist -= 1;
+                    } else {
+                        break;
+                    }
+                } else {
+                    probe = back;
+                    dist -= 1;
+                }
+            }
+        }
+
+        debug_assert!(self.assert_valid_state("after update"));
+
+        self.insert(header, hash);
+
+        let pos_idx = 0usize.wrapping_sub(self.inserted);
+
+        let prev = mem::replace(
+            &mut self.indices[probe],
+            Some(Pos {
+                index: pos_idx,
+                hash,
+            }),
+        );
+
+        if let Some(mut prev) = prev {
+            // Shift forward
+            let mut probe = probe + 1;
+
+            probe_loop!(probe < self.indices.len(), {
+                let pos = &mut self.indices[probe];
+
+                prev = match mem::replace(pos, Some(prev)) {
+                    Some(p) => p,
+                    None => break,
+                };
+            });
+        }
+
+        debug_assert!(self.assert_valid_state("bottom"));
+
+        if let Some((n, _)) = statik {
+            Index::InsertedValue(n, 0)
+        } else {
+            Index::Inserted(0)
+        }
+    }
+
+    fn insert(&mut self, header: Header, hash: HashValue) {
+        self.inserted = self.inserted.wrapping_add(1);
+
+        self.slots.push_front(Slot {
+            hash,
+            header,
+            next: None,
+        });
+    }
+
+    pub fn resize(&mut self, size: usize) {
+        self.max_size = size;
+
+        if size == 0 {
+            self.size = 0;
+
+            for i in &mut self.indices {
+                *i = None;
+            }
+
+            self.slots.clear();
+            self.inserted = 0;
+        } else {
+            self.converge(None);
+        }
+    }
+
+    fn update_size(&mut self, len: usize, prev_idx: Option<usize>) -> bool {
+        self.size += len;
+        self.converge(prev_idx)
+    }
+
+    fn converge(&mut self, prev_idx: Option<usize>) -> bool {
+        let mut ret = false;
+
+        while self.size > self.max_size {
+            ret = true;
+            self.evict(prev_idx);
+        }
+
+        ret
+    }
+
+    fn evict(&mut self, prev_idx: Option<usize>) {
+        let pos_idx = (self.slots.len() - 1).wrapping_sub(self.inserted);
+
+        debug_assert!(!self.slots.is_empty());
+        debug_assert!(self.assert_valid_state("one"));
+
+        // Remove the header
+        let slot = self.slots.pop_back().unwrap();
+        let mut probe = desired_pos(self.mask, slot.hash);
+
+        // Update the size
+        self.size -= slot.header.len();
+
+        debug_assert_eq!(
+            self.indices
+                .iter()
+                .filter_map(|p| *p)
+                .filter(|p| p.index == pos_idx)
+                .count(),
+            1
+        );
+
+        // Find the associated position
+        probe_loop!(probe < self.indices.len(), {
+            debug_assert!(self.indices[probe].is_some());
+
+            let mut pos = self.indices[probe].unwrap();
+
+            if pos.index == pos_idx {
+                if let Some(idx) = slot.next {
+                    pos.index = idx;
+                    self.indices[probe] = Some(pos);
+                } else if Some(pos.index) == prev_idx {
+                    pos.index = 0usize.wrapping_sub(self.inserted + 1);
+                    self.indices[probe] = Some(pos);
+                } else {
+                    self.indices[probe] = None;
+                    self.remove_phase_two(probe);
+                }
+
+                break;
+            }
+        });
+
+        debug_assert!(self.assert_valid_state("two"));
+    }
+
+    // Shifts all indices that were displaced by the header that has just been
+    // removed.
+    fn remove_phase_two(&mut self, probe: usize) {
+        let mut last_probe = probe;
+        let mut probe = probe + 1;
+
+        probe_loop!(probe < self.indices.len(), {
+            if let Some(pos) = self.indices[probe] {
+                if probe_distance(self.mask, pos.hash, probe) > 0 {
+                    self.indices[last_probe] = self.indices[probe].take();
+                } else {
+                    break;
+                }
+            } else {
+                break;
+            }
+
+            last_probe = probe;
+        });
+
+        debug_assert!(self.assert_valid_state("two"));
+    }
+
+    fn reserve_one(&mut self) {
+        let len = self.slots.len();
+
+        if len == self.capacity() {
+            if len == 0 {
+                let new_raw_cap = 8;
+                self.mask = 8 - 1;
+                self.indices = vec![None; new_raw_cap];
+            } else {
+                let raw_cap = self.indices.len();
+                self.grow(raw_cap << 1);
+            }
+        }
+    }
+
+    #[inline]
+    fn grow(&mut self, new_raw_cap: usize) {
+        // This path can never be reached when handling the first allocation in
+        // the map.
+
+        debug_assert!(self.assert_valid_state("top"));
+
+        // find first ideally placed element -- start of cluster
+        let mut first_ideal = 0;
+
+        for (i, pos) in self.indices.iter().enumerate() {
+            if let Some(pos) = *pos {
+                if 0 == probe_distance(self.mask, pos.hash, i) {
+                    first_ideal = i;
+                    break;
+                }
+            }
+        }
+
+        // visit the entries in an order where we can simply reinsert them
+        // into self.indices without any bucket stealing.
+        let old_indices = mem::replace(&mut self.indices, vec![None; new_raw_cap]);
+        self.mask = new_raw_cap.wrapping_sub(1);
+
+        for &pos in &old_indices[first_ideal..] {
+            self.reinsert_entry_in_order(pos);
+        }
+
+        for &pos in &old_indices[..first_ideal] {
+            self.reinsert_entry_in_order(pos);
+        }
+
+        debug_assert!(self.assert_valid_state("bottom"));
+    }
+
+    fn reinsert_entry_in_order(&mut self, pos: Option<Pos>) {
+        if let Some(pos) = pos {
+            // Find first empty bucket and insert there
+            let mut probe = desired_pos(self.mask, pos.hash);
+
+            probe_loop!(probe < self.indices.len(), {
+                if self.indices[probe].is_none() {
+                    // empty bucket, insert here
+                    self.indices[probe] = Some(pos);
+                    return;
+                }
+
+                debug_assert!({
+                    let them = self.indices[probe].unwrap();
+                    let their_distance = probe_distance(self.mask, them.hash, probe);
+                    let our_distance = probe_distance(self.mask, pos.hash, probe);
+
+                    their_distance >= our_distance
+                });
+            });
+        }
+    }
+
+    #[cfg(not(test))]
+    fn assert_valid_state(&self, _: &'static str) -> bool {
+        true
+    }
+
+    #[cfg(test)]
+    fn assert_valid_state(&self, _msg: &'static str) -> bool {
+        /*
+            // Checks that the internal map structure is valid
+            //
+            // Ensure all hash codes in indices match the associated slot
+            for pos in &self.indices {
+                if let Some(pos) = *pos {
+                    let real_idx = pos.index.wrapping_add(self.inserted);
+
+                    if real_idx.wrapping_add(1) != 0 {
+                        assert!(real_idx < self.slots.len(),
+                                "out of index; real={}; len={}, msg={}",
+                                real_idx, self.slots.len(), msg);
+
+                        assert_eq!(pos.hash, self.slots[real_idx].hash,
+                                   "index hash does not match slot; msg={}", msg);
+                    }
+                }
+            }
+
+            // Every index is only available once
+            for i in 0..self.indices.len() {
+                if self.indices[i].is_none() {
+                    continue;
+                }
+
+                for j in i+1..self.indices.len() {
+                    assert_ne!(self.indices[i], self.indices[j],
+                                "duplicate indices; msg={}", msg);
+                }
+            }
+
+            for (index, slot) in self.slots.iter().enumerate() {
+                let mut indexed = None;
+
+                // First, see if the slot is indexed
+                for (i, pos) in self.indices.iter().enumerate() {
+                    if let Some(pos) = *pos {
+                        let real_idx = pos.index.wrapping_add(self.inserted);
+                        if real_idx == index {
+                            indexed = Some(i);
+                            // Already know that there is no dup, so break
+                            break;
+                        }
+                    }
+                }
+
+                if let Some(actual) = indexed {
+                    // Ensure that it is accessible..
+                    let desired = desired_pos(self.mask, slot.hash);
+                    let mut probe = desired;
+                    let mut dist = 0;
+
+                    probe_loop!(probe < self.indices.len(), {
+                        assert!(self.indices[probe].is_some(),
+                                "unexpected empty slot; probe={}; hash={:?}; msg={}",
+                                probe, slot.hash, msg);
+
+                        let pos = self.indices[probe].unwrap();
+
+                        let their_dist = probe_distance(self.mask, pos.hash, probe);
+                        let real_idx = pos.index.wrapping_add(self.inserted);
+
+                        if real_idx == index {
+                            break;
+                        }
+
+                        assert!(dist <= their_dist,
+                                "could not find entry; actual={}; desired={}" +
+                                "probe={}, dist={}; their_dist={}; index={}; msg={}",
+                                actual, desired, probe, dist, their_dist,
+                                index.wrapping_sub(self.inserted), msg);
+
+                        dist += 1;
+                    });
+                } else {
+                    // There is exactly one next link
+                    let cnt = self.slots.iter().map(|s| s.next)
+                        .filter(|n| *n == Some(index.wrapping_sub(self.inserted)))
+                        .count();
+
+                    assert_eq!(1, cnt, "more than one node pointing here; msg={}", msg);
+                }
+            }
+        */
+
+        // TODO: Ensure linked lists are correct: no cycles, etc...
+
+        true
+    }
+}
+
+#[cfg(test)]
+impl Table {
+    /// Returns the number of headers in the table
+    pub fn len(&self) -> usize {
+        self.slots.len()
+    }
+
+    /// Returns the table size
+    pub fn size(&self) -> usize {
+        self.size
+    }
+}
+
+impl Index {
+    fn new(v: Option<(usize, bool)>, e: Header) -> Index {
+        match v {
+            None => Index::NotIndexed(e),
+            Some((n, true)) => Index::Indexed(n, e),
+            Some((n, false)) => Index::Name(n, e),
+        }
+    }
+}
+
+#[inline]
+fn usable_capacity(cap: usize) -> usize {
+    cap - cap / 4
+}
+
+#[inline]
+fn to_raw_capacity(n: usize) -> usize {
+    n + n / 3
+}
+
+#[inline]
+fn desired_pos(mask: usize, hash: HashValue) -> usize {
+    hash.0 & mask
+}
+
+#[inline]
+fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize {
+    current.wrapping_sub(desired_pos(mask, hash)) & mask
+}
+
+fn hash_header(header: &Header) -> HashValue {
+    const MASK: u64 = (MAX_SIZE as u64) - 1;
+
+    let mut h = FnvHasher::default();
+    header.name().hash(&mut h);
+    HashValue((h.finish() & MASK) as usize)
+}
+
+/// Checks the static table for the header. If found, returns the index and a
+/// boolean representing if the value matched as well.
+fn index_static(header: &Header) -> Option<(usize, bool)> {
+    match *header {
+        Header::Field {
+            ref name,
+            ref value,
+        } => match *name {
+            header::ACCEPT_CHARSET => Some((15, false)),
+            header::ACCEPT_ENCODING => {
+                if value == "gzip, deflate" {
+                    Some((16, true))
+                } else {
+                    Some((16, false))
+                }
+            }
+            header::ACCEPT_LANGUAGE => Some((17, false)),
+            header::ACCEPT_RANGES => Some((18, false)),
+            header::ACCEPT => Some((19, false)),
+            header::ACCESS_CONTROL_ALLOW_ORIGIN => Some((20, false)),
+            header::AGE => Some((21, false)),
+            header::ALLOW => Some((22, false)),
+            header::AUTHORIZATION => Some((23, false)),
+            header::CACHE_CONTROL => Some((24, false)),
+            header::CONTENT_DISPOSITION => Some((25, false)),
+            header::CONTENT_ENCODING => Some((26, false)),
+            header::CONTENT_LANGUAGE => Some((27, false)),
+            header::CONTENT_LENGTH => Some((28, false)),
+            header::CONTENT_LOCATION => Some((29, false)),
+            header::CONTENT_RANGE => Some((30, false)),
+            header::CONTENT_TYPE => Some((31, false)),
+            header::COOKIE => Some((32, false)),
+            header::DATE => Some((33, false)),
+            header::ETAG => Some((34, false)),
+            header::EXPECT => Some((35, false)),
+            header::EXPIRES => Some((36, false)),
+            header::FROM => Some((37, false)),
+            header::HOST => Some((38, false)),
+            header::IF_MATCH => Some((39, false)),
+            header::IF_MODIFIED_SINCE => Some((40, false)),
+            header::IF_NONE_MATCH => Some((41, false)),
+            header::IF_RANGE => Some((42, false)),
+            header::IF_UNMODIFIED_SINCE => Some((43, false)),
+            header::LAST_MODIFIED => Some((44, false)),
+            header::LINK => Some((45, false)),
+            header::LOCATION => Some((46, false)),
+            header::MAX_FORWARDS => Some((47, false)),
+            header::PROXY_AUTHENTICATE => Some((48, false)),
+            header::PROXY_AUTHORIZATION => Some((49, false)),
+            header::RANGE => Some((50, false)),
+            header::REFERER => Some((51, false)),
+            header::REFRESH => Some((52, false)),
+            header::RETRY_AFTER => Some((53, false)),
+            header::SERVER => Some((54, false)),
+            header::SET_COOKIE => Some((55, false)),
+            header::STRICT_TRANSPORT_SECURITY => Some((56, false)),
+            header::TRANSFER_ENCODING => Some((57, false)),
+            header::USER_AGENT => Some((58, false)),
+            header::VARY => Some((59, false)),
+            header::VIA => Some((60, false)),
+            header::WWW_AUTHENTICATE => Some((61, false)),
+            _ => None,
+        },
+        Header::Authority(_) => Some((1, false)),
+        Header::Method(ref v) => match *v {
+            Method::GET => Some((2, true)),
+            Method::POST => Some((3, true)),
+            _ => Some((2, false)),
+        },
+        Header::Scheme(ref v) => match &**v {
+            "http" => Some((6, true)),
+            "https" => Some((7, true)),
+            _ => Some((6, false)),
+        },
+        Header::Path(ref v) => match &**v {
+            "/" => Some((4, true)),
+            "/index.html" => Some((5, true)),
+            _ => Some((4, false)),
+        },
+        Header::Protocol(..) => None,
+        Header::Status(ref v) => match u16::from(*v) {
+            200 => Some((8, true)),
+            204 => Some((9, true)),
+            206 => Some((10, true)),
+            304 => Some((11, true)),
+            400 => Some((12, true)),
+            404 => Some((13, true)),
+            500 => Some((14, true)),
+            _ => Some((8, false)),
+        },
+    }
+}
diff --git a/src/hpack/test/fixture.rs b/src/hpack/test/fixture.rs
new file mode 100644
index 0000000..d3f76e3
--- /dev/null
+++ b/src/hpack/test/fixture.rs
@@ -0,0 +1,615 @@
+use crate::hpack::{Decoder, Encoder, Header};
+
+use bytes::BytesMut;
+use hex::FromHex;
+use serde_json::Value;
+
+use std::fs::File;
+use std::io::prelude::*;
+use std::io::Cursor;
+use std::path::Path;
+use std::str;
+
+fn test_fixture(path: &Path) {
+    let mut file = File::open(path).unwrap();
+    let mut data = String::new();
+    file.read_to_string(&mut data).unwrap();
+
+    let story: Value = serde_json::from_str(&data).unwrap();
+    test_story(story);
+}
+
+fn test_story(story: Value) {
+    let story = story.as_object().unwrap();
+
+    if let Some(cases) = story.get("cases") {
+        let mut cases: Vec<_> = cases
+            .as_array()
+            .unwrap()
+            .iter()
+            .map(|case| {
+                let case = case.as_object().unwrap();
+
+                let size = case
+                    .get("header_table_size")
+                    .map(|v| v.as_u64().unwrap() as usize);
+
+                let wire = case.get("wire").unwrap().as_str().unwrap();
+                let wire: Vec<u8> = FromHex::from_hex(wire.as_bytes()).unwrap();
+
+                let expect: Vec<_> = case
+                    .get("headers")
+                    .unwrap()
+                    .as_array()
+                    .unwrap()
+                    .iter()
+                    .map(|h| {
+                        let h = h.as_object().unwrap();
+                        let (name, val) = h.iter().next().unwrap();
+                        (name.clone(), val.as_str().unwrap().to_string())
+                    })
+                    .collect();
+
+                Case {
+                    seqno: case.get("seqno").unwrap().as_u64().unwrap(),
+                    wire,
+                    expect,
+                    header_table_size: size,
+                }
+            })
+            .collect();
+
+        cases.sort_by_key(|c| c.seqno);
+
+        let mut decoder = Decoder::default();
+
+        // First, check decoding against the fixtures
+        for case in &cases {
+            let mut expect = case.expect.clone();
+
+            if let Some(size) = case.header_table_size {
+                decoder.queue_size_update(size);
+            }
+
+            let mut buf = BytesMut::with_capacity(case.wire.len());
+            buf.extend_from_slice(&case.wire);
+            decoder
+                .decode(&mut Cursor::new(&mut buf), |e| {
+                    let (name, value) = expect.remove(0);
+                    assert_eq!(name, key_str(&e));
+                    assert_eq!(value, value_str(&e));
+                })
+                .unwrap();
+
+            assert_eq!(0, expect.len());
+        }
+
+        let mut encoder = Encoder::default();
+        let mut decoder = Decoder::default();
+
+        // Now, encode the headers
+        for case in &cases {
+            let limit = 64 * 1024;
+            let mut buf = BytesMut::with_capacity(limit);
+
+            if let Some(size) = case.header_table_size {
+                encoder.update_max_size(size);
+                decoder.queue_size_update(size);
+            }
+
+            let mut input: Vec<_> = case
+                .expect
+                .iter()
+                .map(|(name, value)| {
+                    Header::new(name.clone().into(), value.clone().into())
+                        .unwrap()
+                        .into()
+                })
+                .collect();
+
+            encoder.encode(&mut input.clone().into_iter(), &mut buf);
+
+            decoder
+                .decode(&mut Cursor::new(&mut buf), |e| {
+                    assert_eq!(e, input.remove(0).reify().unwrap());
+                })
+                .unwrap();
+
+            assert_eq!(0, input.len());
+        }
+    }
+}
+
+struct Case {
+    seqno: u64,
+    wire: Vec<u8>,
+    expect: Vec<(String, String)>,
+    header_table_size: Option<usize>,
+}
+
+fn key_str(e: &Header) -> &str {
+    match *e {
+        Header::Field { ref name, .. } => name.as_str(),
+        Header::Authority(..) => ":authority",
+        Header::Method(..) => ":method",
+        Header::Scheme(..) => ":scheme",
+        Header::Path(..) => ":path",
+        Header::Protocol(..) => ":protocol",
+        Header::Status(..) => ":status",
+    }
+}
+
+fn value_str(e: &Header) -> &str {
+    match *e {
+        Header::Field { ref value, .. } => value.to_str().unwrap(),
+        Header::Authority(ref v) => v,
+        Header::Method(ref m) => m.as_str(),
+        Header::Scheme(ref v) => v,
+        Header::Path(ref v) => v,
+        Header::Protocol(ref v) => v.as_str(),
+        Header::Status(ref v) => v.as_str(),
+    }
+}
+
+macro_rules! fixture_mod {
+    ($module:ident => {
+        $(
+            ($fn:ident, $path:expr);
+        )+
+    }) => {
+        mod $module {
+            $(
+                #[test]
+                fn $fn() {
+                    let path = ::std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
+                        .join("fixtures/hpack")
+                        .join($path);
+
+                    super::test_fixture(path.as_ref());
+                }
+            )+
+        }
+    }
+}
+
+fixture_mod!(
+    haskell_http2_linear_huffman => {
+        (story_00, "haskell-http2-linear-huffman/story_00.json");
+        (story_01, "haskell-http2-linear-huffman/story_01.json");
+        (story_02, "haskell-http2-linear-huffman/story_02.json");
+        (story_03, "haskell-http2-linear-huffman/story_03.json");
+        (story_04, "haskell-http2-linear-huffman/story_04.json");
+        (story_05, "haskell-http2-linear-huffman/story_05.json");
+        (story_06, "haskell-http2-linear-huffman/story_06.json");
+        (story_07, "haskell-http2-linear-huffman/story_07.json");
+        (story_08, "haskell-http2-linear-huffman/story_08.json");
+        (story_09, "haskell-http2-linear-huffman/story_09.json");
+        (story_10, "haskell-http2-linear-huffman/story_10.json");
+        (story_11, "haskell-http2-linear-huffman/story_11.json");
+        (story_12, "haskell-http2-linear-huffman/story_12.json");
+        (story_13, "haskell-http2-linear-huffman/story_13.json");
+        (story_14, "haskell-http2-linear-huffman/story_14.json");
+        (story_15, "haskell-http2-linear-huffman/story_15.json");
+        (story_16, "haskell-http2-linear-huffman/story_16.json");
+        (story_17, "haskell-http2-linear-huffman/story_17.json");
+        (story_18, "haskell-http2-linear-huffman/story_18.json");
+        (story_19, "haskell-http2-linear-huffman/story_19.json");
+        (story_20, "haskell-http2-linear-huffman/story_20.json");
+        (story_21, "haskell-http2-linear-huffman/story_21.json");
+        (story_22, "haskell-http2-linear-huffman/story_22.json");
+        (story_23, "haskell-http2-linear-huffman/story_23.json");
+        (story_24, "haskell-http2-linear-huffman/story_24.json");
+        (story_25, "haskell-http2-linear-huffman/story_25.json");
+        (story_26, "haskell-http2-linear-huffman/story_26.json");
+        (story_27, "haskell-http2-linear-huffman/story_27.json");
+        (story_28, "haskell-http2-linear-huffman/story_28.json");
+        (story_29, "haskell-http2-linear-huffman/story_29.json");
+        (story_30, "haskell-http2-linear-huffman/story_30.json");
+        (story_31, "haskell-http2-linear-huffman/story_31.json");
+    }
+);
+
+fixture_mod!(
+    python_hpack => {
+        (story_00, "python-hpack/story_00.json");
+        (story_01, "python-hpack/story_01.json");
+        (story_02, "python-hpack/story_02.json");
+        (story_03, "python-hpack/story_03.json");
+        (story_04, "python-hpack/story_04.json");
+        (story_05, "python-hpack/story_05.json");
+        (story_06, "python-hpack/story_06.json");
+        (story_07, "python-hpack/story_07.json");
+        (story_08, "python-hpack/story_08.json");
+        (story_09, "python-hpack/story_09.json");
+        (story_10, "python-hpack/story_10.json");
+        (story_11, "python-hpack/story_11.json");
+        (story_12, "python-hpack/story_12.json");
+        (story_13, "python-hpack/story_13.json");
+        (story_14, "python-hpack/story_14.json");
+        (story_15, "python-hpack/story_15.json");
+        (story_16, "python-hpack/story_16.json");
+        (story_17, "python-hpack/story_17.json");
+        (story_18, "python-hpack/story_18.json");
+        (story_19, "python-hpack/story_19.json");
+        (story_20, "python-hpack/story_20.json");
+        (story_21, "python-hpack/story_21.json");
+        (story_22, "python-hpack/story_22.json");
+        (story_23, "python-hpack/story_23.json");
+        (story_24, "python-hpack/story_24.json");
+        (story_25, "python-hpack/story_25.json");
+        (story_26, "python-hpack/story_26.json");
+        (story_27, "python-hpack/story_27.json");
+        (story_28, "python-hpack/story_28.json");
+        (story_29, "python-hpack/story_29.json");
+        (story_30, "python-hpack/story_30.json");
+        (story_31, "python-hpack/story_31.json");
+    }
+);
+
+fixture_mod!(
+    nghttp2_16384_4096 => {
+        (story_00, "nghttp2-16384-4096/story_00.json");
+        (story_01, "nghttp2-16384-4096/story_01.json");
+        (story_02, "nghttp2-16384-4096/story_02.json");
+        (story_03, "nghttp2-16384-4096/story_03.json");
+        (story_04, "nghttp2-16384-4096/story_04.json");
+        (story_05, "nghttp2-16384-4096/story_05.json");
+        (story_06, "nghttp2-16384-4096/story_06.json");
+        (story_07, "nghttp2-16384-4096/story_07.json");
+        (story_08, "nghttp2-16384-4096/story_08.json");
+        (story_09, "nghttp2-16384-4096/story_09.json");
+        (story_10, "nghttp2-16384-4096/story_10.json");
+        (story_11, "nghttp2-16384-4096/story_11.json");
+        (story_12, "nghttp2-16384-4096/story_12.json");
+        (story_13, "nghttp2-16384-4096/story_13.json");
+        (story_14, "nghttp2-16384-4096/story_14.json");
+        (story_15, "nghttp2-16384-4096/story_15.json");
+        (story_16, "nghttp2-16384-4096/story_16.json");
+        (story_17, "nghttp2-16384-4096/story_17.json");
+        (story_18, "nghttp2-16384-4096/story_18.json");
+        (story_19, "nghttp2-16384-4096/story_19.json");
+        (story_20, "nghttp2-16384-4096/story_20.json");
+        (story_21, "nghttp2-16384-4096/story_21.json");
+        (story_22, "nghttp2-16384-4096/story_22.json");
+        (story_23, "nghttp2-16384-4096/story_23.json");
+        (story_24, "nghttp2-16384-4096/story_24.json");
+        (story_25, "nghttp2-16384-4096/story_25.json");
+        (story_26, "nghttp2-16384-4096/story_26.json");
+        (story_27, "nghttp2-16384-4096/story_27.json");
+        (story_28, "nghttp2-16384-4096/story_28.json");
+        (story_29, "nghttp2-16384-4096/story_29.json");
+        (story_30, "nghttp2-16384-4096/story_30.json");
+    }
+);
+
+fixture_mod!(
+    node_http2_hpack => {
+        (story_00, "node-http2-hpack/story_00.json");
+        (story_01, "node-http2-hpack/story_01.json");
+        (story_02, "node-http2-hpack/story_02.json");
+        (story_03, "node-http2-hpack/story_03.json");
+        (story_04, "node-http2-hpack/story_04.json");
+        (story_05, "node-http2-hpack/story_05.json");
+        (story_06, "node-http2-hpack/story_06.json");
+        (story_07, "node-http2-hpack/story_07.json");
+        (story_08, "node-http2-hpack/story_08.json");
+        (story_09, "node-http2-hpack/story_09.json");
+        (story_10, "node-http2-hpack/story_10.json");
+        (story_11, "node-http2-hpack/story_11.json");
+        (story_12, "node-http2-hpack/story_12.json");
+        (story_13, "node-http2-hpack/story_13.json");
+        (story_14, "node-http2-hpack/story_14.json");
+        (story_15, "node-http2-hpack/story_15.json");
+        (story_16, "node-http2-hpack/story_16.json");
+        (story_17, "node-http2-hpack/story_17.json");
+        (story_18, "node-http2-hpack/story_18.json");
+        (story_19, "node-http2-hpack/story_19.json");
+        (story_20, "node-http2-hpack/story_20.json");
+        (story_21, "node-http2-hpack/story_21.json");
+        (story_22, "node-http2-hpack/story_22.json");
+        (story_23, "node-http2-hpack/story_23.json");
+        (story_24, "node-http2-hpack/story_24.json");
+        (story_25, "node-http2-hpack/story_25.json");
+        (story_26, "node-http2-hpack/story_26.json");
+        (story_27, "node-http2-hpack/story_27.json");
+        (story_28, "node-http2-hpack/story_28.json");
+        (story_29, "node-http2-hpack/story_29.json");
+        (story_30, "node-http2-hpack/story_30.json");
+        (story_31, "node-http2-hpack/story_31.json");
+    }
+);
+
+fixture_mod!(
+    nghttp2_change_table_size => {
+        (story_00, "nghttp2-change-table-size/story_00.json");
+        (story_01, "nghttp2-change-table-size/story_01.json");
+        (story_02, "nghttp2-change-table-size/story_02.json");
+        (story_03, "nghttp2-change-table-size/story_03.json");
+        (story_04, "nghttp2-change-table-size/story_04.json");
+        (story_05, "nghttp2-change-table-size/story_05.json");
+        (story_06, "nghttp2-change-table-size/story_06.json");
+        (story_07, "nghttp2-change-table-size/story_07.json");
+        (story_08, "nghttp2-change-table-size/story_08.json");
+        (story_09, "nghttp2-change-table-size/story_09.json");
+        (story_10, "nghttp2-change-table-size/story_10.json");
+        (story_11, "nghttp2-change-table-size/story_11.json");
+        (story_12, "nghttp2-change-table-size/story_12.json");
+        (story_13, "nghttp2-change-table-size/story_13.json");
+        (story_14, "nghttp2-change-table-size/story_14.json");
+        (story_15, "nghttp2-change-table-size/story_15.json");
+        (story_16, "nghttp2-change-table-size/story_16.json");
+        (story_17, "nghttp2-change-table-size/story_17.json");
+        (story_18, "nghttp2-change-table-size/story_18.json");
+        (story_19, "nghttp2-change-table-size/story_19.json");
+        (story_20, "nghttp2-change-table-size/story_20.json");
+        (story_21, "nghttp2-change-table-size/story_21.json");
+        (story_22, "nghttp2-change-table-size/story_22.json");
+        (story_23, "nghttp2-change-table-size/story_23.json");
+        (story_24, "nghttp2-change-table-size/story_24.json");
+        (story_25, "nghttp2-change-table-size/story_25.json");
+        (story_26, "nghttp2-change-table-size/story_26.json");
+        (story_27, "nghttp2-change-table-size/story_27.json");
+        (story_28, "nghttp2-change-table-size/story_28.json");
+        (story_29, "nghttp2-change-table-size/story_29.json");
+        (story_30, "nghttp2-change-table-size/story_30.json");
+    }
+);
+
+fixture_mod!(
+    haskell_http2_static_huffman => {
+        (story_00, "haskell-http2-static-huffman/story_00.json");
+        (story_01, "haskell-http2-static-huffman/story_01.json");
+        (story_02, "haskell-http2-static-huffman/story_02.json");
+        (story_03, "haskell-http2-static-huffman/story_03.json");
+        (story_04, "haskell-http2-static-huffman/story_04.json");
+        (story_05, "haskell-http2-static-huffman/story_05.json");
+        (story_06, "haskell-http2-static-huffman/story_06.json");
+        (story_07, "haskell-http2-static-huffman/story_07.json");
+        (story_08, "haskell-http2-static-huffman/story_08.json");
+        (story_09, "haskell-http2-static-huffman/story_09.json");
+        (story_10, "haskell-http2-static-huffman/story_10.json");
+        (story_11, "haskell-http2-static-huffman/story_11.json");
+        (story_12, "haskell-http2-static-huffman/story_12.json");
+        (story_13, "haskell-http2-static-huffman/story_13.json");
+        (story_14, "haskell-http2-static-huffman/story_14.json");
+        (story_15, "haskell-http2-static-huffman/story_15.json");
+        (story_16, "haskell-http2-static-huffman/story_16.json");
+        (story_17, "haskell-http2-static-huffman/story_17.json");
+        (story_18, "haskell-http2-static-huffman/story_18.json");
+        (story_19, "haskell-http2-static-huffman/story_19.json");
+        (story_20, "haskell-http2-static-huffman/story_20.json");
+        (story_21, "haskell-http2-static-huffman/story_21.json");
+        (story_22, "haskell-http2-static-huffman/story_22.json");
+        (story_23, "haskell-http2-static-huffman/story_23.json");
+        (story_24, "haskell-http2-static-huffman/story_24.json");
+        (story_25, "haskell-http2-static-huffman/story_25.json");
+        (story_26, "haskell-http2-static-huffman/story_26.json");
+        (story_27, "haskell-http2-static-huffman/story_27.json");
+        (story_28, "haskell-http2-static-huffman/story_28.json");
+        (story_29, "haskell-http2-static-huffman/story_29.json");
+        (story_30, "haskell-http2-static-huffman/story_30.json");
+        (story_31, "haskell-http2-static-huffman/story_31.json");
+    }
+);
+
+fixture_mod!(
+    haskell_http2_naive_huffman => {
+        (story_00, "haskell-http2-naive-huffman/story_00.json");
+        (story_01, "haskell-http2-naive-huffman/story_01.json");
+        (story_02, "haskell-http2-naive-huffman/story_02.json");
+        (story_03, "haskell-http2-naive-huffman/story_03.json");
+        (story_04, "haskell-http2-naive-huffman/story_04.json");
+        (story_05, "haskell-http2-naive-huffman/story_05.json");
+        (story_06, "haskell-http2-naive-huffman/story_06.json");
+        (story_07, "haskell-http2-naive-huffman/story_07.json");
+        (story_08, "haskell-http2-naive-huffman/story_08.json");
+        (story_09, "haskell-http2-naive-huffman/story_09.json");
+        (story_10, "haskell-http2-naive-huffman/story_10.json");
+        (story_11, "haskell-http2-naive-huffman/story_11.json");
+        (story_12, "haskell-http2-naive-huffman/story_12.json");
+        (story_13, "haskell-http2-naive-huffman/story_13.json");
+        (story_14, "haskell-http2-naive-huffman/story_14.json");
+        (story_15, "haskell-http2-naive-huffman/story_15.json");
+        (story_16, "haskell-http2-naive-huffman/story_16.json");
+        (story_17, "haskell-http2-naive-huffman/story_17.json");
+        (story_18, "haskell-http2-naive-huffman/story_18.json");
+        (story_19, "haskell-http2-naive-huffman/story_19.json");
+        (story_20, "haskell-http2-naive-huffman/story_20.json");
+        (story_21, "haskell-http2-naive-huffman/story_21.json");
+        (story_22, "haskell-http2-naive-huffman/story_22.json");
+        (story_23, "haskell-http2-naive-huffman/story_23.json");
+        (story_24, "haskell-http2-naive-huffman/story_24.json");
+        (story_25, "haskell-http2-naive-huffman/story_25.json");
+        (story_26, "haskell-http2-naive-huffman/story_26.json");
+        (story_27, "haskell-http2-naive-huffman/story_27.json");
+        (story_28, "haskell-http2-naive-huffman/story_28.json");
+        (story_29, "haskell-http2-naive-huffman/story_29.json");
+        (story_30, "haskell-http2-naive-huffman/story_30.json");
+        (story_31, "haskell-http2-naive-huffman/story_31.json");
+    }
+);
+
+fixture_mod!(
+    haskell_http2_naive => {
+        (story_00, "haskell-http2-naive/story_00.json");
+        (story_01, "haskell-http2-naive/story_01.json");
+        (story_02, "haskell-http2-naive/story_02.json");
+        (story_03, "haskell-http2-naive/story_03.json");
+        (story_04, "haskell-http2-naive/story_04.json");
+        (story_05, "haskell-http2-naive/story_05.json");
+        (story_06, "haskell-http2-naive/story_06.json");
+        (story_07, "haskell-http2-naive/story_07.json");
+        (story_08, "haskell-http2-naive/story_08.json");
+        (story_09, "haskell-http2-naive/story_09.json");
+        (story_10, "haskell-http2-naive/story_10.json");
+        (story_11, "haskell-http2-naive/story_11.json");
+        (story_12, "haskell-http2-naive/story_12.json");
+        (story_13, "haskell-http2-naive/story_13.json");
+        (story_14, "haskell-http2-naive/story_14.json");
+        (story_15, "haskell-http2-naive/story_15.json");
+        (story_16, "haskell-http2-naive/story_16.json");
+        (story_17, "haskell-http2-naive/story_17.json");
+        (story_18, "haskell-http2-naive/story_18.json");
+        (story_19, "haskell-http2-naive/story_19.json");
+        (story_20, "haskell-http2-naive/story_20.json");
+        (story_21, "haskell-http2-naive/story_21.json");
+        (story_22, "haskell-http2-naive/story_22.json");
+        (story_23, "haskell-http2-naive/story_23.json");
+        (story_24, "haskell-http2-naive/story_24.json");
+        (story_25, "haskell-http2-naive/story_25.json");
+        (story_26, "haskell-http2-naive/story_26.json");
+        (story_27, "haskell-http2-naive/story_27.json");
+        (story_28, "haskell-http2-naive/story_28.json");
+        (story_29, "haskell-http2-naive/story_29.json");
+        (story_30, "haskell-http2-naive/story_30.json");
+        (story_31, "haskell-http2-naive/story_31.json");
+    }
+);
+
+fixture_mod!(
+    haskell_http2_static => {
+        (story_00, "haskell-http2-static/story_00.json");
+        (story_01, "haskell-http2-static/story_01.json");
+        (story_02, "haskell-http2-static/story_02.json");
+        (story_03, "haskell-http2-static/story_03.json");
+        (story_04, "haskell-http2-static/story_04.json");
+        (story_05, "haskell-http2-static/story_05.json");
+        (story_06, "haskell-http2-static/story_06.json");
+        (story_07, "haskell-http2-static/story_07.json");
+        (story_08, "haskell-http2-static/story_08.json");
+        (story_09, "haskell-http2-static/story_09.json");
+        (story_10, "haskell-http2-static/story_10.json");
+        (story_11, "haskell-http2-static/story_11.json");
+        (story_12, "haskell-http2-static/story_12.json");
+        (story_13, "haskell-http2-static/story_13.json");
+        (story_14, "haskell-http2-static/story_14.json");
+        (story_15, "haskell-http2-static/story_15.json");
+        (story_16, "haskell-http2-static/story_16.json");
+        (story_17, "haskell-http2-static/story_17.json");
+        (story_18, "haskell-http2-static/story_18.json");
+        (story_19, "haskell-http2-static/story_19.json");
+        (story_20, "haskell-http2-static/story_20.json");
+        (story_21, "haskell-http2-static/story_21.json");
+        (story_22, "haskell-http2-static/story_22.json");
+        (story_23, "haskell-http2-static/story_23.json");
+        (story_24, "haskell-http2-static/story_24.json");
+        (story_25, "haskell-http2-static/story_25.json");
+        (story_26, "haskell-http2-static/story_26.json");
+        (story_27, "haskell-http2-static/story_27.json");
+        (story_28, "haskell-http2-static/story_28.json");
+        (story_29, "haskell-http2-static/story_29.json");
+        (story_30, "haskell-http2-static/story_30.json");
+        (story_31, "haskell-http2-static/story_31.json");
+    }
+);
+
+fixture_mod!(
+    nghttp2 => {
+        (story_00, "nghttp2/story_00.json");
+        (story_01, "nghttp2/story_01.json");
+        (story_02, "nghttp2/story_02.json");
+        (story_03, "nghttp2/story_03.json");
+        (story_04, "nghttp2/story_04.json");
+        (story_05, "nghttp2/story_05.json");
+        (story_06, "nghttp2/story_06.json");
+        (story_07, "nghttp2/story_07.json");
+        (story_08, "nghttp2/story_08.json");
+        (story_09, "nghttp2/story_09.json");
+        (story_10, "nghttp2/story_10.json");
+        (story_11, "nghttp2/story_11.json");
+        (story_12, "nghttp2/story_12.json");
+        (story_13, "nghttp2/story_13.json");
+        (story_14, "nghttp2/story_14.json");
+        (story_15, "nghttp2/story_15.json");
+        (story_16, "nghttp2/story_16.json");
+        (story_17, "nghttp2/story_17.json");
+        (story_18, "nghttp2/story_18.json");
+        (story_19, "nghttp2/story_19.json");
+        (story_20, "nghttp2/story_20.json");
+        (story_21, "nghttp2/story_21.json");
+        (story_22, "nghttp2/story_22.json");
+        (story_23, "nghttp2/story_23.json");
+        (story_24, "nghttp2/story_24.json");
+        (story_25, "nghttp2/story_25.json");
+        (story_26, "nghttp2/story_26.json");
+        (story_27, "nghttp2/story_27.json");
+        (story_28, "nghttp2/story_28.json");
+        (story_29, "nghttp2/story_29.json");
+        (story_30, "nghttp2/story_30.json");
+        (story_31, "nghttp2/story_31.json");
+    }
+);
+
+fixture_mod!(
+    haskell_http2_linear => {
+        (story_00, "haskell-http2-linear/story_00.json");
+        (story_01, "haskell-http2-linear/story_01.json");
+        (story_02, "haskell-http2-linear/story_02.json");
+        (story_03, "haskell-http2-linear/story_03.json");
+        (story_04, "haskell-http2-linear/story_04.json");
+        (story_05, "haskell-http2-linear/story_05.json");
+        (story_06, "haskell-http2-linear/story_06.json");
+        (story_07, "haskell-http2-linear/story_07.json");
+        (story_08, "haskell-http2-linear/story_08.json");
+        (story_09, "haskell-http2-linear/story_09.json");
+        (story_10, "haskell-http2-linear/story_10.json");
+        (story_11, "haskell-http2-linear/story_11.json");
+        (story_12, "haskell-http2-linear/story_12.json");
+        (story_13, "haskell-http2-linear/story_13.json");
+        (story_14, "haskell-http2-linear/story_14.json");
+        (story_15, "haskell-http2-linear/story_15.json");
+        (story_16, "haskell-http2-linear/story_16.json");
+        (story_17, "haskell-http2-linear/story_17.json");
+        (story_18, "haskell-http2-linear/story_18.json");
+        (story_19, "haskell-http2-linear/story_19.json");
+        (story_20, "haskell-http2-linear/story_20.json");
+        (story_21, "haskell-http2-linear/story_21.json");
+        (story_22, "haskell-http2-linear/story_22.json");
+        (story_23, "haskell-http2-linear/story_23.json");
+        (story_24, "haskell-http2-linear/story_24.json");
+        (story_25, "haskell-http2-linear/story_25.json");
+        (story_26, "haskell-http2-linear/story_26.json");
+        (story_27, "haskell-http2-linear/story_27.json");
+        (story_28, "haskell-http2-linear/story_28.json");
+        (story_29, "haskell-http2-linear/story_29.json");
+        (story_30, "haskell-http2-linear/story_30.json");
+        (story_31, "haskell-http2-linear/story_31.json");
+    }
+);
+
+fixture_mod!(
+    go_hpack => {
+        (story_00, "go-hpack/story_00.json");
+        (story_01, "go-hpack/story_01.json");
+        (story_02, "go-hpack/story_02.json");
+        (story_03, "go-hpack/story_03.json");
+        (story_04, "go-hpack/story_04.json");
+        (story_05, "go-hpack/story_05.json");
+        (story_06, "go-hpack/story_06.json");
+        (story_07, "go-hpack/story_07.json");
+        (story_08, "go-hpack/story_08.json");
+        (story_09, "go-hpack/story_09.json");
+        (story_10, "go-hpack/story_10.json");
+        (story_11, "go-hpack/story_11.json");
+        (story_12, "go-hpack/story_12.json");
+        (story_13, "go-hpack/story_13.json");
+        (story_14, "go-hpack/story_14.json");
+        (story_15, "go-hpack/story_15.json");
+        (story_16, "go-hpack/story_16.json");
+        (story_17, "go-hpack/story_17.json");
+        (story_18, "go-hpack/story_18.json");
+        (story_19, "go-hpack/story_19.json");
+        (story_20, "go-hpack/story_20.json");
+        (story_21, "go-hpack/story_21.json");
+        (story_22, "go-hpack/story_22.json");
+        (story_23, "go-hpack/story_23.json");
+        (story_24, "go-hpack/story_24.json");
+        (story_25, "go-hpack/story_25.json");
+        (story_26, "go-hpack/story_26.json");
+        (story_27, "go-hpack/story_27.json");
+        (story_28, "go-hpack/story_28.json");
+        (story_29, "go-hpack/story_29.json");
+        (story_30, "go-hpack/story_30.json");
+        (story_31, "go-hpack/story_31.json");
+    }
+);
diff --git a/src/hpack/test/fuzz.rs b/src/hpack/test/fuzz.rs
new file mode 100644
index 0000000..af9e8ea
--- /dev/null
+++ b/src/hpack/test/fuzz.rs
@@ -0,0 +1,365 @@
+use crate::hpack::{Decoder, Encoder, Header};
+
+use http::header::{HeaderName, HeaderValue};
+
+use bytes::BytesMut;
+use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult};
+use rand::distributions::Slice;
+use rand::rngs::StdRng;
+use rand::{thread_rng, Rng, SeedableRng};
+
+use std::io::Cursor;
+
+const MAX_CHUNK: usize = 2 * 1024;
+
+#[test]
+fn hpack_fuzz() {
+    let _ = env_logger::try_init();
+    fn prop(fuzz: FuzzHpack) -> TestResult {
+        fuzz.run();
+        TestResult::from_bool(true)
+    }
+
+    QuickCheck::new()
+        .tests(100)
+        .quickcheck(prop as fn(FuzzHpack) -> TestResult)
+}
+
+/*
+// If wanting to test with a specific feed, uncomment and fill in the seed.
+#[test]
+fn hpack_fuzz_seeded() {
+    let _ = env_logger::try_init();
+    let seed = [/* fill me in*/];
+    FuzzHpack::new(seed).run();
+}
+*/
+
+#[derive(Debug, Clone)]
+struct FuzzHpack {
+    // The set of headers to encode / decode
+    frames: Vec<HeaderFrame>,
+}
+
+#[derive(Debug, Clone)]
+struct HeaderFrame {
+    resizes: Vec<usize>,
+    headers: Vec<Header<Option<HeaderName>>>,
+}
+
+impl FuzzHpack {
+    fn new(seed: [u8; 32]) -> FuzzHpack {
+        // Seed the RNG
+        let mut rng = StdRng::from_seed(seed);
+
+        // Generates a bunch of source headers
+        let mut source: Vec<Header<Option<HeaderName>>> = vec![];
+
+        for _ in 0..2000 {
+            source.push(gen_header(&mut rng));
+        }
+
+        // Actual test run headers
+        let num: usize = rng.gen_range(40..500);
+
+        let mut frames: Vec<HeaderFrame> = vec![];
+        let mut added = 0;
+
+        let skew: i32 = rng.gen_range(1..5);
+
+        // Rough number of headers to add
+        while added < num {
+            let mut frame = HeaderFrame {
+                resizes: vec![],
+                headers: vec![],
+            };
+
+            match rng.gen_range(0..20) {
+                0 => {
+                    // Two resizes
+                    let high = rng.gen_range(128..MAX_CHUNK * 2);
+                    let low = rng.gen_range(0..high);
+
+                    frame.resizes.extend([low, high]);
+                }
+                1..=3 => {
+                    frame.resizes.push(rng.gen_range(128..MAX_CHUNK * 2));
+                }
+                _ => {}
+            }
+
+            let mut is_name_required = true;
+
+            for _ in 0..rng.gen_range(1..(num - added) + 1) {
+                let x: f64 = rng.gen_range(0.0..1.0);
+                let x = x.powi(skew);
+
+                let i = (x * source.len() as f64) as usize;
+
+                let header = &source[i];
+                match header {
+                    Header::Field { name: None, .. } => {
+                        if is_name_required {
+                            continue;
+                        }
+                    }
+                    Header::Field { .. } => {
+                        is_name_required = false;
+                    }
+                    _ => {
+                        // pseudos can't be followed by a header with no name
+                        is_name_required = true;
+                    }
+                }
+
+                frame.headers.push(header.clone());
+
+                added += 1;
+            }
+
+            frames.push(frame);
+        }
+
+        FuzzHpack { frames }
+    }
+
+    fn run(self) {
+        let frames = self.frames;
+        let mut expect = vec![];
+
+        let mut encoder = Encoder::default();
+        let mut decoder = Decoder::default();
+
+        for frame in frames {
+            // build "expected" frames, such that decoding headers always
+            // includes a name
+            let mut prev_name = None;
+            for header in &frame.headers {
+                match header.clone().reify() {
+                    Ok(h) => {
+                        prev_name = match h {
+                            Header::Field { ref name, .. } => Some(name.clone()),
+                            _ => None,
+                        };
+                        expect.push(h);
+                    }
+                    Err(value) => {
+                        expect.push(Header::Field {
+                            name: prev_name.as_ref().cloned().expect("previous header name"),
+                            value,
+                        });
+                    }
+                }
+            }
+
+            let mut buf = BytesMut::new();
+
+            if let Some(max) = frame.resizes.iter().max() {
+                decoder.queue_size_update(*max);
+            }
+
+            // Apply resizes
+            for resize in &frame.resizes {
+                encoder.update_max_size(*resize);
+            }
+
+            encoder.encode(frame.headers, &mut buf);
+
+            // Decode the chunk!
+            decoder
+                .decode(&mut Cursor::new(&mut buf), |h| {
+                    let e = expect.remove(0);
+                    assert_eq!(h, e);
+                })
+                .expect("full decode");
+        }
+
+        assert_eq!(0, expect.len());
+    }
+}
+
+impl Arbitrary for FuzzHpack {
+    fn arbitrary(_: &mut Gen) -> Self {
+        FuzzHpack::new(thread_rng().gen())
+    }
+}
+
+fn gen_header(g: &mut StdRng) -> Header<Option<HeaderName>> {
+    use http::{Method, StatusCode};
+
+    if g.gen_ratio(1, 10) {
+        match g.gen_range(0u32..5) {
+            0 => {
+                let value = gen_string(g, 4, 20);
+                Header::Authority(to_shared(value))
+            }
+            1 => {
+                let method = match g.gen_range(0u32..6) {
+                    0 => Method::GET,
+                    1 => Method::POST,
+                    2 => Method::PUT,
+                    3 => Method::PATCH,
+                    4 => Method::DELETE,
+                    5 => {
+                        let n: usize = g.gen_range(3..7);
+                        let bytes: Vec<u8> = (0..n)
+                            .map(|_| *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap()))
+                            .collect();
+
+                        Method::from_bytes(&bytes).unwrap()
+                    }
+                    _ => unreachable!(),
+                };
+
+                Header::Method(method)
+            }
+            2 => {
+                let value = match g.gen_range(0u32..2) {
+                    0 => "http",
+                    1 => "https",
+                    _ => unreachable!(),
+                };
+
+                Header::Scheme(to_shared(value.to_string()))
+            }
+            3 => {
+                let value = match g.gen_range(0u32..100) {
+                    0 => "/".to_string(),
+                    1 => "/index.html".to_string(),
+                    _ => gen_string(g, 2, 20),
+                };
+
+                Header::Path(to_shared(value))
+            }
+            4 => {
+                let status = (g.gen::<u16>() % 500) + 100;
+
+                Header::Status(StatusCode::from_u16(status).unwrap())
+            }
+            _ => unreachable!(),
+        }
+    } else {
+        let name = if g.gen_ratio(1, 10) {
+            None
+        } else {
+            Some(gen_header_name(g))
+        };
+        let mut value = gen_header_value(g);
+
+        if g.gen_ratio(1, 30) {
+            value.set_sensitive(true);
+        }
+
+        Header::Field { name, value }
+    }
+}
+
+fn gen_header_name(g: &mut StdRng) -> HeaderName {
+    use http::header;
+
+    if g.gen_ratio(1, 2) {
+        g.sample(
+            Slice::new(&[
+                header::ACCEPT,
+                header::ACCEPT_CHARSET,
+                header::ACCEPT_ENCODING,
+                header::ACCEPT_LANGUAGE,
+                header::ACCEPT_RANGES,
+                header::ACCESS_CONTROL_ALLOW_CREDENTIALS,
+                header::ACCESS_CONTROL_ALLOW_HEADERS,
+                header::ACCESS_CONTROL_ALLOW_METHODS,
+                header::ACCESS_CONTROL_ALLOW_ORIGIN,
+                header::ACCESS_CONTROL_EXPOSE_HEADERS,
+                header::ACCESS_CONTROL_MAX_AGE,
+                header::ACCESS_CONTROL_REQUEST_HEADERS,
+                header::ACCESS_CONTROL_REQUEST_METHOD,
+                header::AGE,
+                header::ALLOW,
+                header::ALT_SVC,
+                header::AUTHORIZATION,
+                header::CACHE_CONTROL,
+                header::CONNECTION,
+                header::CONTENT_DISPOSITION,
+                header::CONTENT_ENCODING,
+                header::CONTENT_LANGUAGE,
+                header::CONTENT_LENGTH,
+                header::CONTENT_LOCATION,
+                header::CONTENT_RANGE,
+                header::CONTENT_SECURITY_POLICY,
+                header::CONTENT_SECURITY_POLICY_REPORT_ONLY,
+                header::CONTENT_TYPE,
+                header::COOKIE,
+                header::DNT,
+                header::DATE,
+                header::ETAG,
+                header::EXPECT,
+                header::EXPIRES,
+                header::FORWARDED,
+                header::FROM,
+                header::HOST,
+                header::IF_MATCH,
+                header::IF_MODIFIED_SINCE,
+                header::IF_NONE_MATCH,
+                header::IF_RANGE,
+                header::IF_UNMODIFIED_SINCE,
+                header::LAST_MODIFIED,
+                header::LINK,
+                header::LOCATION,
+                header::MAX_FORWARDS,
+                header::ORIGIN,
+                header::PRAGMA,
+                header::PROXY_AUTHENTICATE,
+                header::PROXY_AUTHORIZATION,
+                header::PUBLIC_KEY_PINS,
+                header::PUBLIC_KEY_PINS_REPORT_ONLY,
+                header::RANGE,
+                header::REFERER,
+                header::REFERRER_POLICY,
+                header::REFRESH,
+                header::RETRY_AFTER,
+                header::SERVER,
+                header::SET_COOKIE,
+                header::STRICT_TRANSPORT_SECURITY,
+                header::TE,
+                header::TRAILER,
+                header::TRANSFER_ENCODING,
+                header::USER_AGENT,
+                header::UPGRADE,
+                header::UPGRADE_INSECURE_REQUESTS,
+                header::VARY,
+                header::VIA,
+                header::WARNING,
+                header::WWW_AUTHENTICATE,
+                header::X_CONTENT_TYPE_OPTIONS,
+                header::X_DNS_PREFETCH_CONTROL,
+                header::X_FRAME_OPTIONS,
+                header::X_XSS_PROTECTION,
+            ])
+            .unwrap(),
+        )
+        .clone()
+    } else {
+        let value = gen_string(g, 1, 25);
+        HeaderName::from_bytes(value.as_bytes()).unwrap()
+    }
+}
+
+fn gen_header_value(g: &mut StdRng) -> HeaderValue {
+    let value = gen_string(g, 0, 70);
+    HeaderValue::from_bytes(value.as_bytes()).unwrap()
+}
+
+fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String {
+    let bytes: Vec<_> = (min..max)
+        .map(|_| {
+            // Chars to pick from
+            *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----").unwrap())
+        })
+        .collect();
+
+    String::from_utf8(bytes).unwrap()
+}
+
+fn to_shared(src: String) -> crate::hpack::BytesStr {
+    crate::hpack::BytesStr::from(src.as_str())
+}
diff --git a/src/hpack/test/mod.rs b/src/hpack/test/mod.rs
new file mode 100644
index 0000000..9b1f271
--- /dev/null
+++ b/src/hpack/test/mod.rs
@@ -0,0 +1,2 @@
+mod fixture;
+mod fuzz;
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..fd7782f
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,140 @@
+//! An asynchronous, HTTP/2 server and client implementation.
+//!
+//! This library implements the [HTTP/2] specification. The implementation is
+//! asynchronous, using [futures] as the basis for the API. The implementation
+//! is also decoupled from TCP or TLS details. The user must handle ALPN and
+//! HTTP/1.1 upgrades themselves.
+//!
+//! # Getting started
+//!
+//! Add the following to your `Cargo.toml` file:
+//!
+//! ```toml
+//! [dependencies]
+//! h2 = "0.4"
+//! ```
+//!
+//! # Layout
+//!
+//! The crate is split into [`client`] and [`server`] modules. Types that are
+//! common to both clients and servers are located at the root of the crate.
+//!
+//! See module level documentation for more details on how to use `h2`.
+//!
+//! # Handshake
+//!
+//! Both the client and the server require a connection to already be in a state
+//! ready to start the HTTP/2 handshake. This library does not provide
+//! facilities to do this.
+//!
+//! There are three ways to reach an appropriate state to start the HTTP/2
+//! handshake.
+//!
+//! * Opening an HTTP/1.1 connection and performing an [upgrade].
+//! * Opening a connection with TLS and use ALPN to negotiate the protocol.
+//! * Open a connection with prior knowledge, i.e. both the client and the
+//!   server assume that the connection is immediately ready to start the
+//!   HTTP/2 handshake once opened.
+//!
+//! Once the connection is ready to start the HTTP/2 handshake, it can be
+//! passed to [`server::handshake`] or [`client::handshake`]. At this point, the
+//! library will start the handshake process, which consists of:
+//!
+//! * The client sends the connection preface (a predefined sequence of 24
+//! octets).
+//! * Both the client and the server sending a SETTINGS frame.
+//!
+//! See the [Starting HTTP/2] in the specification for more details.
+//!
+//! # Flow control
+//!
+//! [Flow control] is a fundamental feature of HTTP/2. The `h2` library
+//! exposes flow control to the user.
+//!
+//! An HTTP/2 client or server may not send unlimited data to the peer. When a
+//! stream is initiated, both the client and the server are provided with an
+//! initial window size for that stream.  A window size is the number of bytes
+//! the endpoint can send to the peer. At any point in time, the peer may
+//! increase this window size by sending a `WINDOW_UPDATE` frame. Once a client
+//! or server has sent data filling the window for a stream, no further data may
+//! be sent on that stream until the peer increases the window.
+//!
+//! There is also a **connection level** window governing data sent across all
+//! streams.
+//!
+//! Managing flow control for inbound data is done through [`FlowControl`].
+//! Managing flow control for outbound data is done through [`SendStream`]. See
+//! the struct level documentation for those two types for more details.
+//!
+//! [HTTP/2]: https://http2.github.io/
+//! [futures]: https://docs.rs/futures/
+//! [`client`]: client/index.html
+//! [`server`]: server/index.html
+//! [Flow control]: http://httpwg.org/specs/rfc7540.html#FlowControl
+//! [`FlowControl`]: struct.FlowControl.html
+//! [`SendStream`]: struct.SendStream.html
+//! [Starting HTTP/2]: http://httpwg.org/specs/rfc7540.html#starting
+//! [upgrade]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism
+//! [`server::handshake`]: server/fn.handshake.html
+//! [`client::handshake`]: client/fn.handshake.html
+
+#![deny(
+    missing_debug_implementations,
+    missing_docs,
+    clippy::missing_safety_doc,
+    clippy::undocumented_unsafe_blocks
+)]
+#![allow(clippy::type_complexity, clippy::manual_range_contains)]
+#![cfg_attr(test, deny(warnings))]
+
+macro_rules! proto_err {
+    (conn: $($msg:tt)+) => {
+        tracing::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+))
+    };
+    (stream: $($msg:tt)+) => {
+        tracing::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+))
+    };
+}
+
+macro_rules! ready {
+    ($e:expr) => {
+        match $e {
+            ::std::task::Poll::Ready(r) => r,
+            ::std::task::Poll::Pending => return ::std::task::Poll::Pending,
+        }
+    };
+}
+
+#[cfg_attr(feature = "unstable", allow(missing_docs))]
+mod codec;
+mod error;
+mod hpack;
+
+#[cfg(not(feature = "unstable"))]
+mod proto;
+
+#[cfg(feature = "unstable")]
+#[allow(missing_docs)]
+pub mod proto;
+
+#[cfg(not(feature = "unstable"))]
+mod frame;
+
+#[cfg(feature = "unstable")]
+#[allow(missing_docs)]
+pub mod frame;
+
+pub mod client;
+pub mod ext;
+pub mod server;
+mod share;
+
+#[cfg(fuzzing)]
+#[cfg_attr(feature = "unstable", allow(missing_docs))]
+pub mod fuzz_bridge;
+
+pub use crate::error::{Error, Reason};
+pub use crate::share::{FlowControl, Ping, PingPong, Pong, RecvStream, SendStream, StreamId};
+
+#[cfg(feature = "unstable")]
+pub use codec::{Codec, SendError, UserError};
diff --git a/src/proto/connection.rs b/src/proto/connection.rs
new file mode 100644
index 0000000..5969bb8
--- /dev/null
+++ b/src/proto/connection.rs
@@ -0,0 +1,610 @@
+use crate::codec::UserError;
+use crate::frame::{Reason, StreamId};
+use crate::{client, server};
+
+use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE;
+use crate::proto::*;
+
+use bytes::Bytes;
+use futures_core::Stream;
+use std::io;
+use std::marker::PhantomData;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::time::Duration;
+use tokio::io::AsyncRead;
+
+/// An H2 connection
+#[derive(Debug)]
+pub(crate) struct Connection<T, P, B: Buf = Bytes>
+where
+    P: Peer,
+{
+    /// Read / write frame values
+    codec: Codec<T, Prioritized<B>>,
+
+    inner: ConnectionInner<P, B>,
+}
+
+// Extracted part of `Connection` which does not depend on `T`. Reduces the amount of duplicated
+// method instantiations.
+#[derive(Debug)]
+struct ConnectionInner<P, B: Buf = Bytes>
+where
+    P: Peer,
+{
+    /// Tracks the connection level state transitions.
+    state: State,
+
+    /// An error to report back once complete.
+    ///
+    /// This exists separately from State in order to support
+    /// graceful shutdown.
+    error: Option<frame::GoAway>,
+
+    /// Pending GOAWAY frames to write.
+    go_away: GoAway,
+
+    /// Ping/pong handler
+    ping_pong: PingPong,
+
+    /// Connection settings
+    settings: Settings,
+
+    /// Stream state handler
+    streams: Streams<B, P>,
+
+    /// A `tracing` span tracking the lifetime of the connection.
+    span: tracing::Span,
+
+    /// Client or server
+    _phantom: PhantomData<P>,
+}
+
+struct DynConnection<'a, B: Buf = Bytes> {
+    state: &'a mut State,
+
+    go_away: &'a mut GoAway,
+
+    streams: DynStreams<'a, B>,
+
+    error: &'a mut Option<frame::GoAway>,
+
+    ping_pong: &'a mut PingPong,
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct Config {
+    pub next_stream_id: StreamId,
+    pub initial_max_send_streams: usize,
+    pub max_send_buffer_size: usize,
+    pub reset_stream_duration: Duration,
+    pub reset_stream_max: usize,
+    pub remote_reset_stream_max: usize,
+    pub local_error_reset_streams_max: Option<usize>,
+    pub settings: frame::Settings,
+}
+
+#[derive(Debug)]
+enum State {
+    /// Currently open in a sane state
+    Open,
+
+    /// The codec must be flushed
+    Closing(Reason, Initiator),
+
+    /// In a closed state
+    Closed(Reason, Initiator),
+}
+
+impl<T, P, B> Connection<T, P, B>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+    P: Peer,
+    B: Buf,
+{
+    pub fn new(codec: Codec<T, Prioritized<B>>, config: Config) -> Connection<T, P, B> {
+        fn streams_config(config: &Config) -> streams::Config {
+            streams::Config {
+                initial_max_send_streams: config.initial_max_send_streams,
+                local_max_buffer_size: config.max_send_buffer_size,
+                local_next_stream_id: config.next_stream_id,
+                local_push_enabled: config.settings.is_push_enabled().unwrap_or(true),
+                extended_connect_protocol_enabled: config
+                    .settings
+                    .is_extended_connect_protocol_enabled()
+                    .unwrap_or(false),
+                local_reset_duration: config.reset_stream_duration,
+                local_reset_max: config.reset_stream_max,
+                remote_reset_max: config.remote_reset_stream_max,
+                remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE,
+                remote_max_initiated: config
+                    .settings
+                    .max_concurrent_streams()
+                    .map(|max| max as usize),
+                local_max_error_reset_streams: config.local_error_reset_streams_max,
+            }
+        }
+        let streams = Streams::new(streams_config(&config));
+        Connection {
+            codec,
+            inner: ConnectionInner {
+                state: State::Open,
+                error: None,
+                go_away: GoAway::new(),
+                ping_pong: PingPong::new(),
+                settings: Settings::new(config.settings),
+                streams,
+                span: tracing::debug_span!("Connection", peer = %P::NAME),
+                _phantom: PhantomData,
+            },
+        }
+    }
+
+    /// connection flow control
+    pub(crate) fn set_target_window_size(&mut self, size: WindowSize) {
+        let _res = self.inner.streams.set_target_connection_window_size(size);
+        // TODO: proper error handling
+        debug_assert!(_res.is_ok());
+    }
+
+    /// Send a new SETTINGS frame with an updated initial window size.
+    pub(crate) fn set_initial_window_size(&mut self, size: WindowSize) -> Result<(), UserError> {
+        let mut settings = frame::Settings::default();
+        settings.set_initial_window_size(Some(size));
+        self.inner.settings.send_settings(settings)
+    }
+
+    /// Send a new SETTINGS frame with extended CONNECT protocol enabled.
+    pub(crate) fn set_enable_connect_protocol(&mut self) -> Result<(), UserError> {
+        let mut settings = frame::Settings::default();
+        settings.set_enable_connect_protocol(Some(1));
+        self.inner.settings.send_settings(settings)
+    }
+
+    /// Returns the maximum number of concurrent streams that may be initiated
+    /// by this peer.
+    pub(crate) fn max_send_streams(&self) -> usize {
+        self.inner.streams.max_send_streams()
+    }
+
+    /// Returns the maximum number of concurrent streams that may be initiated
+    /// by the remote peer.
+    pub(crate) fn max_recv_streams(&self) -> usize {
+        self.inner.streams.max_recv_streams()
+    }
+
+    #[cfg(feature = "unstable")]
+    pub fn num_wired_streams(&self) -> usize {
+        self.inner.streams.num_wired_streams()
+    }
+
+    /// Returns `Ready` when the connection is ready to receive a frame.
+    ///
+    /// Returns `Error` as this may raise errors that are caused by delayed
+    /// processing of received frames.
+    fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> {
+        let _e = self.inner.span.enter();
+        let span = tracing::trace_span!("poll_ready");
+        let _e = span.enter();
+        // The order of these calls don't really matter too much
+        ready!(self.inner.ping_pong.send_pending_pong(cx, &mut self.codec))?;
+        ready!(self.inner.ping_pong.send_pending_ping(cx, &mut self.codec))?;
+        ready!(self
+            .inner
+            .settings
+            .poll_send(cx, &mut self.codec, &mut self.inner.streams))?;
+        ready!(self.inner.streams.send_pending_refusal(cx, &mut self.codec))?;
+
+        Poll::Ready(Ok(()))
+    }
+
+    /// Send any pending GOAWAY frames.
+    ///
+    /// This will return `Some(reason)` if the connection should be closed
+    /// afterwards. If this is a graceful shutdown, this returns `None`.
+    fn poll_go_away(&mut self, cx: &mut Context) -> Poll<Option<io::Result<Reason>>> {
+        self.inner.go_away.send_pending_go_away(cx, &mut self.codec)
+    }
+
+    pub fn go_away_from_user(&mut self, e: Reason) {
+        self.inner.as_dyn().go_away_from_user(e)
+    }
+
+    fn take_error(&mut self, ours: Reason, initiator: Initiator) -> Result<(), Error> {
+        let (debug_data, theirs) = self
+            .inner
+            .error
+            .take()
+            .as_ref()
+            .map_or((Bytes::new(), Reason::NO_ERROR), |frame| {
+                (frame.debug_data().clone(), frame.reason())
+            });
+
+        match (ours, theirs) {
+            (Reason::NO_ERROR, Reason::NO_ERROR) => Ok(()),
+            (ours, Reason::NO_ERROR) => Err(Error::GoAway(Bytes::new(), ours, initiator)),
+            // If both sides reported an error, give their
+            // error back to th user. We assume our error
+            // was a consequence of their error, and less
+            // important.
+            (_, theirs) => Err(Error::remote_go_away(debug_data, theirs)),
+        }
+    }
+
+    /// Closes the connection by transitioning to a GOAWAY state
+    /// iff there are no streams or references
+    pub fn maybe_close_connection_if_no_streams(&mut self) {
+        // If we poll() and realize that there are no streams or references
+        // then we can close the connection by transitioning to GOAWAY
+        if !self.inner.streams.has_streams_or_other_references() {
+            self.inner.as_dyn().go_away_now(Reason::NO_ERROR);
+        }
+    }
+
+    pub(crate) fn take_user_pings(&mut self) -> Option<UserPings> {
+        self.inner.ping_pong.take_user_pings()
+    }
+
+    /// Advances the internal state of the connection.
+    pub fn poll(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> {
+        // XXX(eliza): cloning the span is unfortunately necessary here in
+        // order to placate the borrow checker — `self` is mutably borrowed by
+        // `poll2`, which means that we can't borrow `self.span` to enter it.
+        // The clone is just an atomic ref bump.
+        let span = self.inner.span.clone();
+        let _e = span.enter();
+        let span = tracing::trace_span!("poll");
+        let _e = span.enter();
+
+        loop {
+            tracing::trace!(connection.state = ?self.inner.state);
+            // TODO: probably clean up this glob of code
+            match self.inner.state {
+                // When open, continue to poll a frame
+                State::Open => {
+                    let result = match self.poll2(cx) {
+                        Poll::Ready(result) => result,
+                        // The connection is not ready to make progress
+                        Poll::Pending => {
+                            // Ensure all window updates have been sent.
+                            //
+                            // This will also handle flushing `self.codec`
+                            ready!(self.inner.streams.poll_complete(cx, &mut self.codec))?;
+
+                            if (self.inner.error.is_some()
+                                || self.inner.go_away.should_close_on_idle())
+                                && !self.inner.streams.has_streams()
+                            {
+                                self.inner.as_dyn().go_away_now(Reason::NO_ERROR);
+                                continue;
+                            }
+
+                            return Poll::Pending;
+                        }
+                    };
+
+                    self.inner.as_dyn().handle_poll2_result(result)?
+                }
+                State::Closing(reason, initiator) => {
+                    tracing::trace!("connection closing after flush");
+                    // Flush/shutdown the codec
+                    ready!(self.codec.shutdown(cx))?;
+
+                    // Transition the state to error
+                    self.inner.state = State::Closed(reason, initiator);
+                }
+                State::Closed(reason, initiator) => {
+                    return Poll::Ready(self.take_error(reason, initiator));
+                }
+            }
+        }
+    }
+
+    fn poll2(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> {
+        // This happens outside of the loop to prevent needing to do a clock
+        // check and then comparison of the queue possibly multiple times a
+        // second (and thus, the clock wouldn't have changed enough to matter).
+        self.clear_expired_reset_streams();
+
+        loop {
+            // First, ensure that the `Connection` is able to receive a frame
+            //
+            // The order here matters:
+            // - poll_go_away may buffer a graceful shutdown GOAWAY frame
+            // - If it has, we've also added a PING to be sent in poll_ready
+            if let Some(reason) = ready!(self.poll_go_away(cx)?) {
+                if self.inner.go_away.should_close_now() {
+                    if self.inner.go_away.is_user_initiated() {
+                        // A user initiated abrupt shutdown shouldn't return
+                        // the same error back to the user.
+                        return Poll::Ready(Ok(()));
+                    } else {
+                        return Poll::Ready(Err(Error::library_go_away(reason)));
+                    }
+                }
+                // Only NO_ERROR should be waiting for idle
+                debug_assert_eq!(
+                    reason,
+                    Reason::NO_ERROR,
+                    "graceful GOAWAY should be NO_ERROR"
+                );
+            }
+            ready!(self.poll_ready(cx))?;
+
+            match self
+                .inner
+                .as_dyn()
+                .recv_frame(ready!(Pin::new(&mut self.codec).poll_next(cx)?))?
+            {
+                ReceivedFrame::Settings(frame) => {
+                    self.inner.settings.recv_settings(
+                        frame,
+                        &mut self.codec,
+                        &mut self.inner.streams,
+                    )?;
+                }
+                ReceivedFrame::Continue => (),
+                ReceivedFrame::Done => {
+                    return Poll::Ready(Ok(()));
+                }
+            }
+        }
+    }
+
+    fn clear_expired_reset_streams(&mut self) {
+        self.inner.streams.clear_expired_reset_streams();
+    }
+}
+
+impl<P, B> ConnectionInner<P, B>
+where
+    P: Peer,
+    B: Buf,
+{
+    fn as_dyn(&mut self) -> DynConnection<'_, B> {
+        let ConnectionInner {
+            state,
+            go_away,
+            streams,
+            error,
+            ping_pong,
+            ..
+        } = self;
+        let streams = streams.as_dyn();
+        DynConnection {
+            state,
+            go_away,
+            streams,
+            error,
+            ping_pong,
+        }
+    }
+}
+
+impl<B> DynConnection<'_, B>
+where
+    B: Buf,
+{
+    fn go_away(&mut self, id: StreamId, e: Reason) {
+        let frame = frame::GoAway::new(id, e);
+        self.streams.send_go_away(id);
+        self.go_away.go_away(frame);
+    }
+
+    fn go_away_now(&mut self, e: Reason) {
+        let last_processed_id = self.streams.last_processed_id();
+        let frame = frame::GoAway::new(last_processed_id, e);
+        self.go_away.go_away_now(frame);
+    }
+
+    fn go_away_now_data(&mut self, e: Reason, data: Bytes) {
+        let last_processed_id = self.streams.last_processed_id();
+        let frame = frame::GoAway::with_debug_data(last_processed_id, e, data);
+        self.go_away.go_away_now(frame);
+    }
+
+    fn go_away_from_user(&mut self, e: Reason) {
+        let last_processed_id = self.streams.last_processed_id();
+        let frame = frame::GoAway::new(last_processed_id, e);
+        self.go_away.go_away_from_user(frame);
+
+        // Notify all streams of reason we're abruptly closing.
+        self.streams.handle_error(Error::user_go_away(e));
+    }
+
+    fn handle_poll2_result(&mut self, result: Result<(), Error>) -> Result<(), Error> {
+        match result {
+            // The connection has shutdown normally
+            Ok(()) => {
+                *self.state = State::Closing(Reason::NO_ERROR, Initiator::Library);
+                Ok(())
+            }
+            // Attempting to read a frame resulted in a connection level
+            // error. This is handled by setting a GOAWAY frame followed by
+            // terminating the connection.
+            Err(Error::GoAway(debug_data, reason, initiator)) => {
+                let e = Error::GoAway(debug_data.clone(), reason, initiator);
+                tracing::debug!(error = ?e, "Connection::poll; connection error");
+
+                // We may have already sent a GOAWAY for this error,
+                // if so, don't send another, just flush and close up.
+                if self
+                    .go_away
+                    .going_away()
+                    .map_or(false, |frame| frame.reason() == reason)
+                {
+                    tracing::trace!("    -> already going away");
+                    *self.state = State::Closing(reason, initiator);
+                    return Ok(());
+                }
+
+                // Reset all active streams
+                self.streams.handle_error(e);
+                self.go_away_now_data(reason, debug_data);
+                Ok(())
+            }
+            // Attempting to read a frame resulted in a stream level error.
+            // This is handled by resetting the frame then trying to read
+            // another frame.
+            Err(Error::Reset(id, reason, initiator)) => {
+                debug_assert_eq!(initiator, Initiator::Library);
+                tracing::trace!(?id, ?reason, "stream error");
+                self.streams.send_reset(id, reason);
+                Ok(())
+            }
+            // Attempting to read a frame resulted in an I/O error. All
+            // active streams must be reset.
+            //
+            // TODO: Are I/O errors recoverable?
+            Err(Error::Io(kind, inner)) => {
+                tracing::debug!(error = ?kind, "Connection::poll; IO error");
+                let e = Error::Io(kind, inner);
+
+                // Reset all active streams
+                self.streams.handle_error(e.clone());
+
+                // Some client implementations drop the connections without notifying its peer
+                // Attempting to read after the client dropped the connection results in UnexpectedEof
+                // If as a server, we don't have anything more to send, just close the connection
+                // without error
+                //
+                // See https://github.com/hyperium/hyper/issues/3427
+                if self.streams.is_server()
+                    && self.streams.is_buffer_empty()
+                    && matches!(kind, io::ErrorKind::UnexpectedEof)
+                {
+                    *self.state = State::Closed(Reason::NO_ERROR, Initiator::Library);
+                    return Ok(());
+                }
+
+                // Return the error
+                Err(e)
+            }
+        }
+    }
+
+    fn recv_frame(&mut self, frame: Option<Frame>) -> Result<ReceivedFrame, Error> {
+        use crate::frame::Frame::*;
+        match frame {
+            Some(Headers(frame)) => {
+                tracing::trace!(?frame, "recv HEADERS");
+                self.streams.recv_headers(frame)?;
+            }
+            Some(Data(frame)) => {
+                tracing::trace!(?frame, "recv DATA");
+                self.streams.recv_data(frame)?;
+            }
+            Some(Reset(frame)) => {
+                tracing::trace!(?frame, "recv RST_STREAM");
+                self.streams.recv_reset(frame)?;
+            }
+            Some(PushPromise(frame)) => {
+                tracing::trace!(?frame, "recv PUSH_PROMISE");
+                self.streams.recv_push_promise(frame)?;
+            }
+            Some(Settings(frame)) => {
+                tracing::trace!(?frame, "recv SETTINGS");
+                return Ok(ReceivedFrame::Settings(frame));
+            }
+            Some(GoAway(frame)) => {
+                tracing::trace!(?frame, "recv GOAWAY");
+                // This should prevent starting new streams,
+                // but should allow continuing to process current streams
+                // until they are all EOS. Once they are, State should
+                // transition to GoAway.
+                self.streams.recv_go_away(&frame)?;
+                *self.error = Some(frame);
+            }
+            Some(Ping(frame)) => {
+                tracing::trace!(?frame, "recv PING");
+                let status = self.ping_pong.recv_ping(frame);
+                if status.is_shutdown() {
+                    assert!(
+                        self.go_away.is_going_away(),
+                        "received unexpected shutdown ping"
+                    );
+
+                    let last_processed_id = self.streams.last_processed_id();
+                    self.go_away(last_processed_id, Reason::NO_ERROR);
+                }
+            }
+            Some(WindowUpdate(frame)) => {
+                tracing::trace!(?frame, "recv WINDOW_UPDATE");
+                self.streams.recv_window_update(frame)?;
+            }
+            Some(Priority(frame)) => {
+                tracing::trace!(?frame, "recv PRIORITY");
+                // TODO: handle
+            }
+            None => {
+                tracing::trace!("codec closed");
+                self.streams.recv_eof(false).expect("mutex poisoned");
+                return Ok(ReceivedFrame::Done);
+            }
+        }
+        Ok(ReceivedFrame::Continue)
+    }
+}
+
+enum ReceivedFrame {
+    Settings(frame::Settings),
+    Continue,
+    Done,
+}
+
+impl<T, B> Connection<T, client::Peer, B>
+where
+    T: AsyncRead + AsyncWrite,
+    B: Buf,
+{
+    pub(crate) fn streams(&self) -> &Streams<B, client::Peer> {
+        &self.inner.streams
+    }
+}
+
+impl<T, B> Connection<T, server::Peer, B>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+    B: Buf,
+{
+    pub fn next_incoming(&mut self) -> Option<StreamRef<B>> {
+        self.inner.streams.next_incoming()
+    }
+
+    // Graceful shutdown only makes sense for server peers.
+    pub fn go_away_gracefully(&mut self) {
+        if self.inner.go_away.is_going_away() {
+            // No reason to start a new one.
+            return;
+        }
+
+        // According to http://httpwg.org/specs/rfc7540.html#GOAWAY:
+        //
+        // > A server that is attempting to gracefully shut down a connection
+        // > SHOULD send an initial GOAWAY frame with the last stream
+        // > identifier set to 2^31-1 and a NO_ERROR code. This signals to the
+        // > client that a shutdown is imminent and that initiating further
+        // > requests is prohibited. After allowing time for any in-flight
+        // > stream creation (at least one round-trip time), the server can
+        // > send another GOAWAY frame with an updated last stream identifier.
+        // > This ensures that a connection can be cleanly shut down without
+        // > losing requests.
+        self.inner.as_dyn().go_away(StreamId::MAX, Reason::NO_ERROR);
+
+        // We take the advice of waiting 1 RTT literally, and wait
+        // for a pong before proceeding.
+        self.inner.ping_pong.ping_shutdown();
+    }
+}
+
+impl<T, P, B> Drop for Connection<T, P, B>
+where
+    P: Peer,
+    B: Buf,
+{
+    fn drop(&mut self) {
+        // Ignore errors as this indicates that the mutex is poisoned.
+        let _ = self.inner.streams.recv_eof(true);
+    }
+}
diff --git a/src/proto/error.rs b/src/proto/error.rs
new file mode 100644
index 0000000..ad02331
--- /dev/null
+++ b/src/proto/error.rs
@@ -0,0 +1,91 @@
+use crate::codec::SendError;
+use crate::frame::{Reason, StreamId};
+
+use bytes::Bytes;
+use std::fmt;
+use std::io;
+
+/// Either an H2 reason  or an I/O error
+#[derive(Clone, Debug)]
+pub enum Error {
+    Reset(StreamId, Reason, Initiator),
+    GoAway(Bytes, Reason, Initiator),
+    Io(io::ErrorKind, Option<String>),
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum Initiator {
+    User,
+    Library,
+    Remote,
+}
+
+impl Error {
+    pub(crate) fn is_local(&self) -> bool {
+        match *self {
+            Self::Reset(_, _, initiator) | Self::GoAway(_, _, initiator) => initiator.is_local(),
+            Self::Io(..) => true,
+        }
+    }
+
+    pub(crate) fn user_go_away(reason: Reason) -> Self {
+        Self::GoAway(Bytes::new(), reason, Initiator::User)
+    }
+
+    pub(crate) fn library_reset(stream_id: StreamId, reason: Reason) -> Self {
+        Self::Reset(stream_id, reason, Initiator::Library)
+    }
+
+    pub(crate) fn library_go_away(reason: Reason) -> Self {
+        Self::GoAway(Bytes::new(), reason, Initiator::Library)
+    }
+
+    pub(crate) fn library_go_away_data(reason: Reason, debug_data: impl Into<Bytes>) -> Self {
+        Self::GoAway(debug_data.into(), reason, Initiator::Library)
+    }
+
+    pub(crate) fn remote_reset(stream_id: StreamId, reason: Reason) -> Self {
+        Self::Reset(stream_id, reason, Initiator::Remote)
+    }
+
+    pub(crate) fn remote_go_away(debug_data: Bytes, reason: Reason) -> Self {
+        Self::GoAway(debug_data, reason, Initiator::Remote)
+    }
+}
+
+impl Initiator {
+    fn is_local(&self) -> bool {
+        match *self {
+            Self::User | Self::Library => true,
+            Self::Remote => false,
+        }
+    }
+}
+
+impl fmt::Display for Error {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            Self::Reset(_, reason, _) | Self::GoAway(_, reason, _) => reason.fmt(fmt),
+            Self::Io(_, Some(ref inner)) => inner.fmt(fmt),
+            Self::Io(kind, None) => io::Error::from(kind).fmt(fmt),
+        }
+    }
+}
+
+impl From<io::ErrorKind> for Error {
+    fn from(src: io::ErrorKind) -> Self {
+        Error::Io(src, None)
+    }
+}
+
+impl From<io::Error> for Error {
+    fn from(src: io::Error) -> Self {
+        Error::Io(src.kind(), src.get_ref().map(|inner| inner.to_string()))
+    }
+}
+
+impl From<Error> for SendError {
+    fn from(src: Error) -> Self {
+        Self::Connection(src)
+    }
+}
diff --git a/src/proto/go_away.rs b/src/proto/go_away.rs
new file mode 100644
index 0000000..d52252c
--- /dev/null
+++ b/src/proto/go_away.rs
@@ -0,0 +1,154 @@
+use crate::codec::Codec;
+use crate::frame::{self, Reason, StreamId};
+
+use bytes::Buf;
+use std::io;
+use std::task::{Context, Poll};
+use tokio::io::AsyncWrite;
+
+/// Manages our sending of GOAWAY frames.
+#[derive(Debug)]
+pub(super) struct GoAway {
+    /// Whether the connection should close now, or wait until idle.
+    close_now: bool,
+    /// Records if we've sent any GOAWAY before.
+    going_away: Option<GoingAway>,
+    /// Whether the user started the GOAWAY by calling `abrupt_shutdown`.
+    is_user_initiated: bool,
+    /// A GOAWAY frame that must be buffered in the Codec immediately.
+    pending: Option<frame::GoAway>,
+}
+
+/// Keeps a memory of any GOAWAY frames we've sent before.
+///
+/// This looks very similar to a `frame::GoAway`, but is a separate type. Why?
+/// Mostly for documentation purposes. This type is to record status. If it
+/// were a `frame::GoAway`, it might appear like we eventually wanted to
+/// serialize it. We **only** want to be able to look up these fields at a
+/// later time.
+#[derive(Debug)]
+pub(crate) struct GoingAway {
+    /// Stores the highest stream ID of a GOAWAY that has been sent.
+    ///
+    /// It's illegal to send a subsequent GOAWAY with a higher ID.
+    last_processed_id: StreamId,
+
+    /// Records the error code of any GOAWAY frame sent.
+    reason: Reason,
+}
+
+impl GoAway {
+    pub fn new() -> Self {
+        GoAway {
+            close_now: false,
+            going_away: None,
+            is_user_initiated: false,
+            pending: None,
+        }
+    }
+
+    /// Enqueue a GOAWAY frame to be written.
+    ///
+    /// The connection is expected to continue to run until idle.
+    pub fn go_away(&mut self, f: frame::GoAway) {
+        if let Some(ref going_away) = self.going_away {
+            assert!(
+                f.last_stream_id() <= going_away.last_processed_id,
+                "GOAWAY stream IDs shouldn't be higher; \
+                 last_processed_id = {:?}, f.last_stream_id() = {:?}",
+                going_away.last_processed_id,
+                f.last_stream_id(),
+            );
+        }
+
+        self.going_away = Some(GoingAway {
+            last_processed_id: f.last_stream_id(),
+            reason: f.reason(),
+        });
+        self.pending = Some(f);
+    }
+
+    pub fn go_away_now(&mut self, f: frame::GoAway) {
+        self.close_now = true;
+        if let Some(ref going_away) = self.going_away {
+            // Prevent sending the same GOAWAY twice.
+            if going_away.last_processed_id == f.last_stream_id() && going_away.reason == f.reason()
+            {
+                return;
+            }
+        }
+        self.go_away(f);
+    }
+
+    pub fn go_away_from_user(&mut self, f: frame::GoAway) {
+        self.is_user_initiated = true;
+        self.go_away_now(f);
+    }
+
+    /// Return if a GOAWAY has ever been scheduled.
+    pub fn is_going_away(&self) -> bool {
+        self.going_away.is_some()
+    }
+
+    pub fn is_user_initiated(&self) -> bool {
+        self.is_user_initiated
+    }
+
+    /// Returns the going away info, if any.
+    pub fn going_away(&self) -> Option<&GoingAway> {
+        self.going_away.as_ref()
+    }
+
+    /// Returns if the connection should close now, or wait until idle.
+    pub fn should_close_now(&self) -> bool {
+        self.pending.is_none() && self.close_now
+    }
+
+    /// Returns if the connection should be closed when idle.
+    pub fn should_close_on_idle(&self) -> bool {
+        !self.close_now
+            && self
+                .going_away
+                .as_ref()
+                .map(|g| g.last_processed_id != StreamId::MAX)
+                .unwrap_or(false)
+    }
+
+    /// Try to write a pending GOAWAY frame to the buffer.
+    ///
+    /// If a frame is written, the `Reason` of the GOAWAY is returned.
+    pub fn send_pending_go_away<T, B>(
+        &mut self,
+        cx: &mut Context,
+        dst: &mut Codec<T, B>,
+    ) -> Poll<Option<io::Result<Reason>>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        if let Some(frame) = self.pending.take() {
+            if !dst.poll_ready(cx)?.is_ready() {
+                self.pending = Some(frame);
+                return Poll::Pending;
+            }
+
+            let reason = frame.reason();
+            dst.buffer(frame.into()).expect("invalid GOAWAY frame");
+
+            return Poll::Ready(Some(Ok(reason)));
+        } else if self.should_close_now() {
+            return match self.going_away().map(|going_away| going_away.reason) {
+                Some(reason) => Poll::Ready(Some(Ok(reason))),
+                None => Poll::Ready(None),
+            };
+        }
+
+        Poll::Ready(None)
+    }
+}
+
+impl GoingAway {
+    pub(crate) fn reason(&self) -> Reason {
+        self.reason
+    }
+}
diff --git a/src/proto/mod.rs b/src/proto/mod.rs
new file mode 100644
index 0000000..5609275
--- /dev/null
+++ b/src/proto/mod.rs
@@ -0,0 +1,38 @@
+mod connection;
+mod error;
+mod go_away;
+mod peer;
+mod ping_pong;
+mod settings;
+mod streams;
+
+pub(crate) use self::connection::{Config, Connection};
+pub use self::error::{Error, Initiator};
+pub(crate) use self::peer::{Dyn as DynPeer, Peer};
+pub(crate) use self::ping_pong::UserPings;
+pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams};
+pub(crate) use self::streams::{Open, PollReset, Prioritized};
+
+use crate::codec::Codec;
+
+use self::go_away::GoAway;
+use self::ping_pong::PingPong;
+use self::settings::Settings;
+
+use crate::frame::{self, Frame};
+
+use bytes::Buf;
+
+use tokio::io::AsyncWrite;
+
+pub type PingPayload = [u8; 8];
+
+pub type WindowSize = u32;
+
+// Constants
+pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; // i32::MAX as u32
+pub const DEFAULT_REMOTE_RESET_STREAM_MAX: usize = 20;
+pub const DEFAULT_LOCAL_RESET_COUNT_MAX: usize = 1024;
+pub const DEFAULT_RESET_STREAM_MAX: usize = 10;
+pub const DEFAULT_RESET_STREAM_SECS: u64 = 30;
+pub const DEFAULT_MAX_SEND_BUFFER_SIZE: usize = 1024 * 400;
diff --git a/src/proto/peer.rs b/src/proto/peer.rs
new file mode 100644
index 0000000..cbe7fb2
--- /dev/null
+++ b/src/proto/peer.rs
@@ -0,0 +1,95 @@
+use crate::error::Reason;
+use crate::frame::{Pseudo, StreamId};
+use crate::proto::{Error, Open};
+
+use http::{HeaderMap, Request, Response};
+
+use std::fmt;
+
+/// Either a Client or a Server
+pub(crate) trait Peer {
+    /// Message type polled from the transport
+    type Poll: fmt::Debug;
+    const NAME: &'static str;
+
+    fn r#dyn() -> Dyn;
+
+    //fn is_server() -> bool;
+
+    fn convert_poll_message(
+        pseudo: Pseudo,
+        fields: HeaderMap,
+        stream_id: StreamId,
+    ) -> Result<Self::Poll, Error>;
+
+    /*
+    fn is_local_init(id: StreamId) -> bool {
+        assert!(!id.is_zero());
+        Self::is_server() == id.is_server_initiated()
+    }
+    */
+}
+
+/// A dynamic representation of `Peer`.
+///
+/// This is used internally to avoid incurring a generic on all internal types.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub(crate) enum Dyn {
+    Client,
+    Server,
+}
+
+#[derive(Debug)]
+pub enum PollMessage {
+    Client(Response<()>),
+    Server(Request<()>),
+}
+
+// ===== impl Dyn =====
+
+impl Dyn {
+    pub fn is_server(&self) -> bool {
+        *self == Dyn::Server
+    }
+
+    pub fn is_local_init(&self, id: StreamId) -> bool {
+        assert!(!id.is_zero());
+        self.is_server() == id.is_server_initiated()
+    }
+
+    pub fn convert_poll_message(
+        &self,
+        pseudo: Pseudo,
+        fields: HeaderMap,
+        stream_id: StreamId,
+    ) -> Result<PollMessage, Error> {
+        if self.is_server() {
+            crate::server::Peer::convert_poll_message(pseudo, fields, stream_id)
+                .map(PollMessage::Server)
+        } else {
+            crate::client::Peer::convert_poll_message(pseudo, fields, stream_id)
+                .map(PollMessage::Client)
+        }
+    }
+
+    /// Returns true if the remote peer can initiate a stream with the given ID.
+    pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), Error> {
+        if self.is_server() {
+            // Ensure that the ID is a valid client initiated ID
+            if mode.is_push_promise() || !id.is_client_initiated() {
+                proto_err!(conn: "cannot open stream {:?} - not client initiated", id);
+                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+            }
+
+            Ok(())
+        } else {
+            // Ensure that the ID is a valid server initiated ID
+            if !mode.is_push_promise() || !id.is_server_initiated() {
+                proto_err!(conn: "cannot open stream {:?} - not server initiated", id);
+                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+            }
+
+            Ok(())
+        }
+    }
+}
diff --git a/src/proto/ping_pong.rs b/src/proto/ping_pong.rs
new file mode 100644
index 0000000..59023e2
--- /dev/null
+++ b/src/proto/ping_pong.rs
@@ -0,0 +1,291 @@
+use crate::codec::Codec;
+use crate::frame::Ping;
+use crate::proto::{self, PingPayload};
+
+use bytes::Buf;
+use futures_util::task::AtomicWaker;
+use std::io;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Arc;
+use std::task::{Context, Poll};
+use tokio::io::AsyncWrite;
+
+/// Acknowledges ping requests from the remote.
+#[derive(Debug)]
+pub(crate) struct PingPong {
+    pending_ping: Option<PendingPing>,
+    pending_pong: Option<PingPayload>,
+    user_pings: Option<UserPingsRx>,
+}
+
+#[derive(Debug)]
+pub(crate) struct UserPings(Arc<UserPingsInner>);
+
+#[derive(Debug)]
+struct UserPingsRx(Arc<UserPingsInner>);
+
+#[derive(Debug)]
+struct UserPingsInner {
+    state: AtomicUsize,
+    /// Task to wake up the main `Connection`.
+    ping_task: AtomicWaker,
+    /// Task to wake up `share::PingPong::poll_pong`.
+    pong_task: AtomicWaker,
+}
+
+#[derive(Debug)]
+struct PendingPing {
+    payload: PingPayload,
+    sent: bool,
+}
+
+/// Status returned from `PingPong::recv_ping`.
+#[derive(Debug)]
+pub(crate) enum ReceivedPing {
+    MustAck,
+    Unknown,
+    Shutdown,
+}
+
+/// No user ping pending.
+const USER_STATE_EMPTY: usize = 0;
+/// User has called `send_ping`, but PING hasn't been written yet.
+const USER_STATE_PENDING_PING: usize = 1;
+/// User PING has been written, waiting for PONG.
+const USER_STATE_PENDING_PONG: usize = 2;
+/// We've received user PONG, waiting for user to `poll_pong`.
+const USER_STATE_RECEIVED_PONG: usize = 3;
+/// The connection is closed.
+const USER_STATE_CLOSED: usize = 4;
+
+// ===== impl PingPong =====
+
+impl PingPong {
+    pub(crate) fn new() -> Self {
+        PingPong {
+            pending_ping: None,
+            pending_pong: None,
+            user_pings: None,
+        }
+    }
+
+    /// Can only be called once. If called a second time, returns `None`.
+    pub(crate) fn take_user_pings(&mut self) -> Option<UserPings> {
+        if self.user_pings.is_some() {
+            return None;
+        }
+
+        let user_pings = Arc::new(UserPingsInner {
+            state: AtomicUsize::new(USER_STATE_EMPTY),
+            ping_task: AtomicWaker::new(),
+            pong_task: AtomicWaker::new(),
+        });
+        self.user_pings = Some(UserPingsRx(user_pings.clone()));
+        Some(UserPings(user_pings))
+    }
+
+    pub(crate) fn ping_shutdown(&mut self) {
+        assert!(self.pending_ping.is_none());
+
+        self.pending_ping = Some(PendingPing {
+            payload: Ping::SHUTDOWN,
+            sent: false,
+        });
+    }
+
+    /// Process a ping
+    pub(crate) fn recv_ping(&mut self, ping: Ping) -> ReceivedPing {
+        // The caller should always check that `send_pongs` returns ready before
+        // calling `recv_ping`.
+        assert!(self.pending_pong.is_none());
+
+        if ping.is_ack() {
+            if let Some(pending) = self.pending_ping.take() {
+                if &pending.payload == ping.payload() {
+                    assert_eq!(
+                        &pending.payload,
+                        &Ping::SHUTDOWN,
+                        "pending_ping should be for shutdown",
+                    );
+                    tracing::trace!("recv PING SHUTDOWN ack");
+                    return ReceivedPing::Shutdown;
+                }
+
+                // if not the payload we expected, put it back.
+                self.pending_ping = Some(pending);
+            }
+
+            if let Some(ref users) = self.user_pings {
+                if ping.payload() == &Ping::USER && users.receive_pong() {
+                    tracing::trace!("recv PING USER ack");
+                    return ReceivedPing::Unknown;
+                }
+            }
+
+            // else we were acked a ping we didn't send?
+            // The spec doesn't require us to do anything about this,
+            // so for resiliency, just ignore it for now.
+            tracing::warn!("recv PING ack that we never sent: {:?}", ping);
+            ReceivedPing::Unknown
+        } else {
+            // Save the ping's payload to be sent as an acknowledgement.
+            self.pending_pong = Some(ping.into_payload());
+            ReceivedPing::MustAck
+        }
+    }
+
+    /// Send any pending pongs.
+    pub(crate) fn send_pending_pong<T, B>(
+        &mut self,
+        cx: &mut Context,
+        dst: &mut Codec<T, B>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        if let Some(pong) = self.pending_pong.take() {
+            if !dst.poll_ready(cx)?.is_ready() {
+                self.pending_pong = Some(pong);
+                return Poll::Pending;
+            }
+
+            dst.buffer(Ping::pong(pong).into())
+                .expect("invalid pong frame");
+        }
+
+        Poll::Ready(Ok(()))
+    }
+
+    /// Send any pending pings.
+    pub(crate) fn send_pending_ping<T, B>(
+        &mut self,
+        cx: &mut Context,
+        dst: &mut Codec<T, B>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        if let Some(ref mut ping) = self.pending_ping {
+            if !ping.sent {
+                if !dst.poll_ready(cx)?.is_ready() {
+                    return Poll::Pending;
+                }
+
+                dst.buffer(Ping::new(ping.payload).into())
+                    .expect("invalid ping frame");
+                ping.sent = true;
+            }
+        } else if let Some(ref users) = self.user_pings {
+            if users.0.state.load(Ordering::Acquire) == USER_STATE_PENDING_PING {
+                if !dst.poll_ready(cx)?.is_ready() {
+                    return Poll::Pending;
+                }
+
+                dst.buffer(Ping::new(Ping::USER).into())
+                    .expect("invalid ping frame");
+                users
+                    .0
+                    .state
+                    .store(USER_STATE_PENDING_PONG, Ordering::Release);
+            } else {
+                users.0.ping_task.register(cx.waker());
+            }
+        }
+
+        Poll::Ready(Ok(()))
+    }
+}
+
+impl ReceivedPing {
+    pub(crate) fn is_shutdown(&self) -> bool {
+        matches!(*self, Self::Shutdown)
+    }
+}
+
+// ===== impl UserPings =====
+
+impl UserPings {
+    pub(crate) fn send_ping(&self) -> Result<(), Option<proto::Error>> {
+        let prev = self
+            .0
+            .state
+            .compare_exchange(
+                USER_STATE_EMPTY,        // current
+                USER_STATE_PENDING_PING, // new
+                Ordering::AcqRel,
+                Ordering::Acquire,
+            )
+            .unwrap_or_else(|v| v);
+
+        match prev {
+            USER_STATE_EMPTY => {
+                self.0.ping_task.wake();
+                Ok(())
+            }
+            USER_STATE_CLOSED => Err(Some(broken_pipe().into())),
+            _ => {
+                // Was already pending, user error!
+                Err(None)
+            }
+        }
+    }
+
+    pub(crate) fn poll_pong(&self, cx: &mut Context) -> Poll<Result<(), proto::Error>> {
+        // Must register before checking state, in case state were to change
+        // before we could register, and then the ping would just be lost.
+        self.0.pong_task.register(cx.waker());
+        let prev = self
+            .0
+            .state
+            .compare_exchange(
+                USER_STATE_RECEIVED_PONG, // current
+                USER_STATE_EMPTY,         // new
+                Ordering::AcqRel,
+                Ordering::Acquire,
+            )
+            .unwrap_or_else(|v| v);
+
+        match prev {
+            USER_STATE_RECEIVED_PONG => Poll::Ready(Ok(())),
+            USER_STATE_CLOSED => Poll::Ready(Err(broken_pipe().into())),
+            _ => Poll::Pending,
+        }
+    }
+}
+
+// ===== impl UserPingsRx =====
+
+impl UserPingsRx {
+    fn receive_pong(&self) -> bool {
+        let prev = self
+            .0
+            .state
+            .compare_exchange(
+                USER_STATE_PENDING_PONG,  // current
+                USER_STATE_RECEIVED_PONG, // new
+                Ordering::AcqRel,
+                Ordering::Acquire,
+            )
+            .unwrap_or_else(|v| v);
+
+        if prev == USER_STATE_PENDING_PONG {
+            self.0.pong_task.wake();
+            true
+        } else {
+            false
+        }
+    }
+}
+
+impl Drop for UserPingsRx {
+    fn drop(&mut self) {
+        self.0.state.store(USER_STATE_CLOSED, Ordering::Release);
+        self.0.pong_task.wake();
+    }
+}
+
+fn broken_pipe() -> io::Error {
+    io::ErrorKind::BrokenPipe.into()
+}
diff --git a/src/proto/settings.rs b/src/proto/settings.rs
new file mode 100644
index 0000000..d6155fc
--- /dev/null
+++ b/src/proto/settings.rs
@@ -0,0 +1,168 @@
+use crate::codec::UserError;
+use crate::error::Reason;
+use crate::proto::*;
+use std::task::{Context, Poll};
+
+#[derive(Debug)]
+pub(crate) struct Settings {
+    /// Our local SETTINGS sync state with the remote.
+    local: Local,
+    /// Received SETTINGS frame pending processing. The ACK must be written to
+    /// the socket first then the settings applied **before** receiving any
+    /// further frames.
+    remote: Option<frame::Settings>,
+    /// Whether the connection has received the initial SETTINGS frame from the
+    /// remote peer.
+    has_received_remote_initial_settings: bool,
+}
+
+#[derive(Debug)]
+enum Local {
+    /// We want to send these SETTINGS to the remote when the socket is ready.
+    ToSend(frame::Settings),
+    /// We have sent these SETTINGS and are waiting for the remote to ACK
+    /// before we apply them.
+    WaitingAck(frame::Settings),
+    /// Our local settings are in sync with the remote.
+    Synced,
+}
+
+impl Settings {
+    pub(crate) fn new(local: frame::Settings) -> Self {
+        Settings {
+            // We assume the initial local SETTINGS were flushed during
+            // the handshake process.
+            local: Local::WaitingAck(local),
+            remote: None,
+            has_received_remote_initial_settings: false,
+        }
+    }
+
+    pub(crate) fn recv_settings<T, B, C, P>(
+        &mut self,
+        frame: frame::Settings,
+        codec: &mut Codec<T, B>,
+        streams: &mut Streams<C, P>,
+    ) -> Result<(), Error>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+        C: Buf,
+        P: Peer,
+    {
+        if frame.is_ack() {
+            match &self.local {
+                Local::WaitingAck(local) => {
+                    tracing::debug!("received settings ACK; applying {:?}", local);
+
+                    if let Some(max) = local.max_frame_size() {
+                        codec.set_max_recv_frame_size(max as usize);
+                    }
+
+                    if let Some(max) = local.max_header_list_size() {
+                        codec.set_max_recv_header_list_size(max as usize);
+                    }
+
+                    if let Some(val) = local.header_table_size() {
+                        codec.set_recv_header_table_size(val as usize);
+                    }
+
+                    streams.apply_local_settings(local)?;
+                    self.local = Local::Synced;
+                    Ok(())
+                }
+                Local::ToSend(..) | Local::Synced => {
+                    // We haven't sent any SETTINGS frames to be ACKed, so
+                    // this is very bizarre! Remote is either buggy or malicious.
+                    proto_err!(conn: "received unexpected settings ack");
+                    Err(Error::library_go_away(Reason::PROTOCOL_ERROR))
+                }
+            }
+        } else {
+            // We always ACK before reading more frames, so `remote` should
+            // always be none!
+            assert!(self.remote.is_none());
+            self.remote = Some(frame);
+            Ok(())
+        }
+    }
+
+    pub(crate) fn send_settings(&mut self, frame: frame::Settings) -> Result<(), UserError> {
+        assert!(!frame.is_ack());
+        match &self.local {
+            Local::ToSend(..) | Local::WaitingAck(..) => Err(UserError::SendSettingsWhilePending),
+            Local::Synced => {
+                tracing::trace!("queue to send local settings: {:?}", frame);
+                self.local = Local::ToSend(frame);
+                Ok(())
+            }
+        }
+    }
+
+    /// Sets `true` to `self.has_received_remote_initial_settings`.
+    /// Returns `true` if this method is called for the first time.
+    /// (i.e. it is the initial SETTINGS frame from the remote peer)
+    fn mark_remote_initial_settings_as_received(&mut self) -> bool {
+        let has_received = self.has_received_remote_initial_settings;
+        self.has_received_remote_initial_settings = true;
+        !has_received
+    }
+
+    pub(crate) fn poll_send<T, B, C, P>(
+        &mut self,
+        cx: &mut Context,
+        dst: &mut Codec<T, B>,
+        streams: &mut Streams<C, P>,
+    ) -> Poll<Result<(), Error>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+        C: Buf,
+        P: Peer,
+    {
+        if let Some(settings) = self.remote.clone() {
+            if !dst.poll_ready(cx)?.is_ready() {
+                return Poll::Pending;
+            }
+
+            // Create an ACK settings frame
+            let frame = frame::Settings::ack();
+
+            // Buffer the settings frame
+            dst.buffer(frame.into()).expect("invalid settings frame");
+
+            tracing::trace!("ACK sent; applying settings");
+
+            let is_initial = self.mark_remote_initial_settings_as_received();
+            streams.apply_remote_settings(&settings, is_initial)?;
+
+            if let Some(val) = settings.header_table_size() {
+                dst.set_send_header_table_size(val as usize);
+            }
+
+            if let Some(val) = settings.max_frame_size() {
+                dst.set_max_send_frame_size(val as usize);
+            }
+        }
+
+        self.remote = None;
+
+        match &self.local {
+            Local::ToSend(settings) => {
+                if !dst.poll_ready(cx)?.is_ready() {
+                    return Poll::Pending;
+                }
+
+                // Buffer the settings frame
+                dst.buffer(settings.clone().into())
+                    .expect("invalid settings frame");
+                tracing::trace!("local settings sent; waiting for ack: {:?}", settings);
+
+                self.local = Local::WaitingAck(settings.clone());
+            }
+            Local::WaitingAck(..) | Local::Synced => {}
+        }
+
+        Poll::Ready(Ok(()))
+    }
+}
diff --git a/src/proto/streams/buffer.rs b/src/proto/streams/buffer.rs
new file mode 100644
index 0000000..02d2650
--- /dev/null
+++ b/src/proto/streams/buffer.rs
@@ -0,0 +1,99 @@
+use slab::Slab;
+
+/// Buffers frames for multiple streams.
+#[derive(Debug)]
+pub struct Buffer<T> {
+    slab: Slab<Slot<T>>,
+}
+
+/// A sequence of frames in a `Buffer`
+#[derive(Debug)]
+pub struct Deque {
+    indices: Option<Indices>,
+}
+
+/// Tracks the head & tail for a sequence of frames in a `Buffer`.
+#[derive(Debug, Default, Copy, Clone)]
+struct Indices {
+    head: usize,
+    tail: usize,
+}
+
+#[derive(Debug)]
+struct Slot<T> {
+    value: T,
+    next: Option<usize>,
+}
+
+impl<T> Buffer<T> {
+    pub fn new() -> Self {
+        Buffer { slab: Slab::new() }
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.slab.is_empty()
+    }
+}
+
+impl Deque {
+    pub fn new() -> Self {
+        Deque { indices: None }
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.indices.is_none()
+    }
+
+    pub fn push_back<T>(&mut self, buf: &mut Buffer<T>, value: T) {
+        let key = buf.slab.insert(Slot { value, next: None });
+
+        match self.indices {
+            Some(ref mut idxs) => {
+                buf.slab[idxs.tail].next = Some(key);
+                idxs.tail = key;
+            }
+            None => {
+                self.indices = Some(Indices {
+                    head: key,
+                    tail: key,
+                });
+            }
+        }
+    }
+
+    pub fn push_front<T>(&mut self, buf: &mut Buffer<T>, value: T) {
+        let key = buf.slab.insert(Slot { value, next: None });
+
+        match self.indices {
+            Some(ref mut idxs) => {
+                buf.slab[key].next = Some(idxs.head);
+                idxs.head = key;
+            }
+            None => {
+                self.indices = Some(Indices {
+                    head: key,
+                    tail: key,
+                });
+            }
+        }
+    }
+
+    pub fn pop_front<T>(&mut self, buf: &mut Buffer<T>) -> Option<T> {
+        match self.indices {
+            Some(mut idxs) => {
+                let mut slot = buf.slab.remove(idxs.head);
+
+                if idxs.head == idxs.tail {
+                    assert!(slot.next.is_none());
+                    self.indices = None;
+                } else {
+                    idxs.head = slot.next.take().unwrap();
+                    self.indices = Some(idxs);
+                }
+
+                Some(slot.value)
+            }
+            None => None,
+        }
+    }
+}
diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs
new file mode 100644
index 0000000..fdb07f1
--- /dev/null
+++ b/src/proto/streams/counts.rs
@@ -0,0 +1,287 @@
+use super::*;
+
+use std::usize;
+
+#[derive(Debug)]
+pub(super) struct Counts {
+    /// Acting as a client or server. This allows us to track which values to
+    /// inc / dec.
+    peer: peer::Dyn,
+
+    /// Maximum number of locally initiated streams
+    max_send_streams: usize,
+
+    /// Current number of remote initiated streams
+    num_send_streams: usize,
+
+    /// Maximum number of remote initiated streams
+    max_recv_streams: usize,
+
+    /// Current number of locally initiated streams
+    num_recv_streams: usize,
+
+    /// Maximum number of pending locally reset streams
+    max_local_reset_streams: usize,
+
+    /// Current number of pending locally reset streams
+    num_local_reset_streams: usize,
+
+    /// Max number of "pending accept" streams that were remotely reset
+    max_remote_reset_streams: usize,
+
+    /// Current number of "pending accept" streams that were remotely reset
+    num_remote_reset_streams: usize,
+
+    /// Maximum number of locally reset streams due to protocol error across
+    /// the lifetime of the connection.
+    ///
+    /// When this gets exceeded, we issue GOAWAYs.
+    max_local_error_reset_streams: Option<usize>,
+
+    /// Total number of locally reset streams due to protocol error across the
+    /// lifetime of the connection.
+    num_local_error_reset_streams: usize,
+}
+
+impl Counts {
+    /// Create a new `Counts` using the provided configuration values.
+    pub fn new(peer: peer::Dyn, config: &Config) -> Self {
+        Counts {
+            peer,
+            max_send_streams: config.initial_max_send_streams,
+            num_send_streams: 0,
+            max_recv_streams: config.remote_max_initiated.unwrap_or(usize::MAX),
+            num_recv_streams: 0,
+            max_local_reset_streams: config.local_reset_max,
+            num_local_reset_streams: 0,
+            max_remote_reset_streams: config.remote_reset_max,
+            num_remote_reset_streams: 0,
+            max_local_error_reset_streams: config.local_max_error_reset_streams,
+            num_local_error_reset_streams: 0,
+        }
+    }
+
+    /// Returns true when the next opened stream will reach capacity of outbound streams
+    ///
+    /// The number of client send streams is incremented in prioritize; send_request has to guess if
+    /// it should wait before allowing another request to be sent.
+    pub fn next_send_stream_will_reach_capacity(&self) -> bool {
+        self.max_send_streams <= (self.num_send_streams + 1)
+    }
+
+    /// Returns the current peer
+    pub fn peer(&self) -> peer::Dyn {
+        self.peer
+    }
+
+    pub fn has_streams(&self) -> bool {
+        self.num_send_streams != 0 || self.num_recv_streams != 0
+    }
+
+    /// Returns true if we can issue another local reset due to protocol error.
+    pub fn can_inc_num_local_error_resets(&self) -> bool {
+        if let Some(max) = self.max_local_error_reset_streams {
+            max > self.num_local_error_reset_streams
+        } else {
+            true
+        }
+    }
+
+    pub fn inc_num_local_error_resets(&mut self) {
+        assert!(self.can_inc_num_local_error_resets());
+
+        // Increment the number of remote initiated streams
+        self.num_local_error_reset_streams += 1;
+    }
+
+    pub(crate) fn max_local_error_resets(&self) -> Option<usize> {
+        self.max_local_error_reset_streams
+    }
+
+    /// Returns true if the receive stream concurrency can be incremented
+    pub fn can_inc_num_recv_streams(&self) -> bool {
+        self.max_recv_streams > self.num_recv_streams
+    }
+
+    /// Increments the number of concurrent receive streams.
+    ///
+    /// # Panics
+    ///
+    /// Panics on failure as this should have been validated before hand.
+    pub fn inc_num_recv_streams(&mut self, stream: &mut store::Ptr) {
+        assert!(self.can_inc_num_recv_streams());
+        assert!(!stream.is_counted);
+
+        // Increment the number of remote initiated streams
+        self.num_recv_streams += 1;
+        stream.is_counted = true;
+    }
+
+    /// Returns true if the send stream concurrency can be incremented
+    pub fn can_inc_num_send_streams(&self) -> bool {
+        self.max_send_streams > self.num_send_streams
+    }
+
+    /// Increments the number of concurrent send streams.
+    ///
+    /// # Panics
+    ///
+    /// Panics on failure as this should have been validated before hand.
+    pub fn inc_num_send_streams(&mut self, stream: &mut store::Ptr) {
+        assert!(self.can_inc_num_send_streams());
+        assert!(!stream.is_counted);
+
+        // Increment the number of remote initiated streams
+        self.num_send_streams += 1;
+        stream.is_counted = true;
+    }
+
+    /// Returns true if the number of pending reset streams can be incremented.
+    pub fn can_inc_num_reset_streams(&self) -> bool {
+        self.max_local_reset_streams > self.num_local_reset_streams
+    }
+
+    /// Increments the number of pending reset streams.
+    ///
+    /// # Panics
+    ///
+    /// Panics on failure as this should have been validated before hand.
+    pub fn inc_num_reset_streams(&mut self) {
+        assert!(self.can_inc_num_reset_streams());
+
+        self.num_local_reset_streams += 1;
+    }
+
+    pub(crate) fn max_remote_reset_streams(&self) -> usize {
+        self.max_remote_reset_streams
+    }
+
+    /// Returns true if the number of pending REMOTE reset streams can be
+    /// incremented.
+    pub(crate) fn can_inc_num_remote_reset_streams(&self) -> bool {
+        self.max_remote_reset_streams > self.num_remote_reset_streams
+    }
+
+    /// Increments the number of pending REMOTE reset streams.
+    ///
+    /// # Panics
+    ///
+    /// Panics on failure as this should have been validated before hand.
+    pub(crate) fn inc_num_remote_reset_streams(&mut self) {
+        assert!(self.can_inc_num_remote_reset_streams());
+
+        self.num_remote_reset_streams += 1;
+    }
+
+    pub(crate) fn dec_num_remote_reset_streams(&mut self) {
+        assert!(self.num_remote_reset_streams > 0);
+
+        self.num_remote_reset_streams -= 1;
+    }
+
+    pub fn apply_remote_settings(&mut self, settings: &frame::Settings, is_initial: bool) {
+        match settings.max_concurrent_streams() {
+            Some(val) => self.max_send_streams = val as usize,
+            None if is_initial => self.max_send_streams = usize::MAX,
+            None => {}
+        }
+    }
+
+    /// Run a block of code that could potentially transition a stream's state.
+    ///
+    /// If the stream state transitions to closed, this function will perform
+    /// all necessary cleanup.
+    ///
+    /// TODO: Is this function still needed?
+    pub fn transition<F, U>(&mut self, mut stream: store::Ptr, f: F) -> U
+    where
+        F: FnOnce(&mut Self, &mut store::Ptr) -> U,
+    {
+        // TODO: Does this need to be computed before performing the action?
+        let is_pending_reset = stream.is_pending_reset_expiration();
+
+        // Run the action
+        let ret = f(self, &mut stream);
+
+        self.transition_after(stream, is_pending_reset);
+
+        ret
+    }
+
+    // TODO: move this to macro?
+    pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) {
+        tracing::trace!(
+            "transition_after; stream={:?}; state={:?}; is_closed={:?}; \
+             pending_send_empty={:?}; buffered_send_data={}; \
+             num_recv={}; num_send={}",
+            stream.id,
+            stream.state,
+            stream.is_closed(),
+            stream.pending_send.is_empty(),
+            stream.buffered_send_data,
+            self.num_recv_streams,
+            self.num_send_streams
+        );
+
+        if stream.is_closed() {
+            if !stream.is_pending_reset_expiration() {
+                stream.unlink();
+                if is_reset_counted {
+                    self.dec_num_reset_streams();
+                }
+            }
+
+            if stream.is_counted {
+                tracing::trace!("dec_num_streams; stream={:?}", stream.id);
+                // Decrement the number of active streams.
+                self.dec_num_streams(&mut stream);
+            }
+        }
+
+        // Release the stream if it requires releasing
+        if stream.is_released() {
+            stream.remove();
+        }
+    }
+
+    /// Returns the maximum number of streams that can be initiated by this
+    /// peer.
+    pub(crate) fn max_send_streams(&self) -> usize {
+        self.max_send_streams
+    }
+
+    /// Returns the maximum number of streams that can be initiated by the
+    /// remote peer.
+    pub(crate) fn max_recv_streams(&self) -> usize {
+        self.max_recv_streams
+    }
+
+    fn dec_num_streams(&mut self, stream: &mut store::Ptr) {
+        assert!(stream.is_counted);
+
+        if self.peer.is_local_init(stream.id) {
+            assert!(self.num_send_streams > 0);
+            self.num_send_streams -= 1;
+            stream.is_counted = false;
+        } else {
+            assert!(self.num_recv_streams > 0);
+            self.num_recv_streams -= 1;
+            stream.is_counted = false;
+        }
+    }
+
+    fn dec_num_reset_streams(&mut self) {
+        assert!(self.num_local_reset_streams > 0);
+        self.num_local_reset_streams -= 1;
+    }
+}
+
+impl Drop for Counts {
+    fn drop(&mut self) {
+        use std::thread;
+
+        if !thread::panicking() {
+            debug_assert!(!self.has_streams());
+        }
+    }
+}
diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs
new file mode 100644
index 0000000..57a9358
--- /dev/null
+++ b/src/proto/streams/flow_control.rs
@@ -0,0 +1,269 @@
+use crate::frame::Reason;
+use crate::proto::{WindowSize, MAX_WINDOW_SIZE};
+
+use std::fmt;
+
+// We don't want to send WINDOW_UPDATE frames for tiny changes, but instead
+// aggregate them when the changes are significant. Many implementations do
+// this by keeping a "ratio" of the update version the allowed window size.
+//
+// While some may wish to represent this ratio as percentage, using a f32,
+// we skip having to deal with float math and stick to integers. To do so,
+// the "ratio" is represented by 2 i32s, split into the numerator and
+// denominator. For example, a 50% ratio is simply represented as 1/2.
+//
+// An example applying this ratio: If a stream has an allowed window size of
+// 100 bytes, WINDOW_UPDATE frames are scheduled when the unclaimed change
+// becomes greater than 1/2, or 50 bytes.
+const UNCLAIMED_NUMERATOR: i32 = 1;
+const UNCLAIMED_DENOMINATOR: i32 = 2;
+
+#[test]
+#[allow(clippy::assertions_on_constants)]
+fn sanity_unclaimed_ratio() {
+    assert!(UNCLAIMED_NUMERATOR < UNCLAIMED_DENOMINATOR);
+    assert!(UNCLAIMED_NUMERATOR >= 0);
+    assert!(UNCLAIMED_DENOMINATOR > 0);
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct FlowControl {
+    /// Window the peer knows about.
+    ///
+    /// This can go negative if a SETTINGS_INITIAL_WINDOW_SIZE is received.
+    ///
+    /// For example, say the peer sends a request and uses 32kb of the window.
+    /// We send a SETTINGS_INITIAL_WINDOW_SIZE of 16kb. The peer has to adjust
+    /// its understanding of the capacity of the window, and that would be:
+    ///
+    /// ```notrust
+    /// default (64kb) - used (32kb) - settings_diff (64kb - 16kb): -16kb
+    /// ```
+    window_size: Window,
+
+    /// Window that we know about.
+    ///
+    /// This can go negative if a user declares a smaller target window than
+    /// the peer knows about.
+    available: Window,
+}
+
+impl FlowControl {
+    pub fn new() -> FlowControl {
+        FlowControl {
+            window_size: Window(0),
+            available: Window(0),
+        }
+    }
+
+    /// Returns the window size as known by the peer
+    pub fn window_size(&self) -> WindowSize {
+        self.window_size.as_size()
+    }
+
+    /// Returns the window size available to the consumer
+    pub fn available(&self) -> Window {
+        self.available
+    }
+
+    /// Returns true if there is unavailable window capacity
+    pub fn has_unavailable(&self) -> bool {
+        if self.window_size < 0 {
+            return false;
+        }
+
+        self.window_size > self.available
+    }
+
+    pub fn claim_capacity(&mut self, capacity: WindowSize) -> Result<(), Reason> {
+        self.available.decrease_by(capacity)
+    }
+
+    pub fn assign_capacity(&mut self, capacity: WindowSize) -> Result<(), Reason> {
+        self.available.increase_by(capacity)
+    }
+
+    /// If a WINDOW_UPDATE frame should be sent, returns a positive number
+    /// representing the increment to be used.
+    ///
+    /// If there is no available bytes to be reclaimed, or the number of
+    /// available bytes does not reach the threshold, this returns `None`.
+    ///
+    /// This represents pending outbound WINDOW_UPDATE frames.
+    pub fn unclaimed_capacity(&self) -> Option<WindowSize> {
+        let available = self.available;
+
+        if self.window_size >= available {
+            return None;
+        }
+
+        let unclaimed = available.0 - self.window_size.0;
+        let threshold = self.window_size.0 / UNCLAIMED_DENOMINATOR * UNCLAIMED_NUMERATOR;
+
+        if unclaimed < threshold {
+            None
+        } else {
+            Some(unclaimed as WindowSize)
+        }
+    }
+
+    /// Increase the window size.
+    ///
+    /// This is called after receiving a WINDOW_UPDATE frame
+    pub fn inc_window(&mut self, sz: WindowSize) -> Result<(), Reason> {
+        let (val, overflow) = self.window_size.0.overflowing_add(sz as i32);
+
+        if overflow {
+            return Err(Reason::FLOW_CONTROL_ERROR);
+        }
+
+        if val > MAX_WINDOW_SIZE as i32 {
+            return Err(Reason::FLOW_CONTROL_ERROR);
+        }
+
+        tracing::trace!(
+            "inc_window; sz={}; old={}; new={}",
+            sz,
+            self.window_size,
+            val
+        );
+
+        self.window_size = Window(val);
+        Ok(())
+    }
+
+    /// Decrement the send-side window size.
+    ///
+    /// This is called after receiving a SETTINGS frame with a lower
+    /// INITIAL_WINDOW_SIZE value.
+    pub fn dec_send_window(&mut self, sz: WindowSize) -> Result<(), Reason> {
+        tracing::trace!(
+            "dec_window; sz={}; window={}, available={}",
+            sz,
+            self.window_size,
+            self.available
+        );
+        // ~~This should not be able to overflow `window_size` from the bottom.~~ wrong. it can.
+        self.window_size.decrease_by(sz)?;
+        Ok(())
+    }
+
+    /// Decrement the recv-side window size.
+    ///
+    /// This is called after receiving a SETTINGS ACK frame with a lower
+    /// INITIAL_WINDOW_SIZE value.
+    pub fn dec_recv_window(&mut self, sz: WindowSize) -> Result<(), Reason> {
+        tracing::trace!(
+            "dec_recv_window; sz={}; window={}, available={}",
+            sz,
+            self.window_size,
+            self.available
+        );
+        // This should not be able to overflow `window_size` from the bottom.
+        self.window_size.decrease_by(sz)?;
+        self.available.decrease_by(sz)?;
+        Ok(())
+    }
+
+    /// Decrements the window reflecting data has actually been sent. The caller
+    /// must ensure that the window has capacity.
+    pub fn send_data(&mut self, sz: WindowSize) -> Result<(), Reason> {
+        tracing::trace!(
+            "send_data; sz={}; window={}; available={}",
+            sz,
+            self.window_size,
+            self.available
+        );
+
+        // If send size is zero it's meaningless to update flow control window
+        if sz > 0 {
+            // Ensure that the argument is correct
+            assert!(self.window_size.0 >= sz as i32);
+
+            // Update values
+            self.window_size.decrease_by(sz)?;
+            self.available.decrease_by(sz)?;
+        }
+        Ok(())
+    }
+}
+
+/// The current capacity of a flow-controlled Window.
+///
+/// This number can go negative when either side has used a certain amount
+/// of capacity when the other side advertises a reduction in size.
+///
+/// This type tries to centralize the knowledge of addition and subtraction
+/// to this capacity, instead of having integer casts throughout the source.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd)]
+pub struct Window(i32);
+
+impl Window {
+    pub fn as_size(&self) -> WindowSize {
+        if self.0 < 0 {
+            0
+        } else {
+            self.0 as WindowSize
+        }
+    }
+
+    pub fn checked_size(&self) -> WindowSize {
+        assert!(self.0 >= 0, "negative Window");
+        self.0 as WindowSize
+    }
+
+    pub fn decrease_by(&mut self, other: WindowSize) -> Result<(), Reason> {
+        if let Some(v) = self.0.checked_sub(other as i32) {
+            self.0 = v;
+            Ok(())
+        } else {
+            Err(Reason::FLOW_CONTROL_ERROR)
+        }
+    }
+
+    pub fn increase_by(&mut self, other: WindowSize) -> Result<(), Reason> {
+        let other = self.add(other)?;
+        self.0 = other.0;
+        Ok(())
+    }
+
+    pub fn add(&self, other: WindowSize) -> Result<Self, Reason> {
+        if let Some(v) = self.0.checked_add(other as i32) {
+            Ok(Self(v))
+        } else {
+            Err(Reason::FLOW_CONTROL_ERROR)
+        }
+    }
+}
+
+impl PartialEq<usize> for Window {
+    fn eq(&self, other: &usize) -> bool {
+        if self.0 < 0 {
+            false
+        } else {
+            (self.0 as usize).eq(other)
+        }
+    }
+}
+
+impl PartialOrd<usize> for Window {
+    fn partial_cmp(&self, other: &usize) -> Option<::std::cmp::Ordering> {
+        if self.0 < 0 {
+            Some(::std::cmp::Ordering::Less)
+        } else {
+            (self.0 as usize).partial_cmp(other)
+        }
+    }
+}
+
+impl fmt::Display for Window {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&self.0, f)
+    }
+}
+
+impl From<Window> for isize {
+    fn from(w: Window) -> isize {
+        w.0 as isize
+    }
+}
diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs
new file mode 100644
index 0000000..c4a8323
--- /dev/null
+++ b/src/proto/streams/mod.rs
@@ -0,0 +1,75 @@
+mod buffer;
+mod counts;
+mod flow_control;
+mod prioritize;
+mod recv;
+mod send;
+mod state;
+mod store;
+mod stream;
+#[allow(clippy::module_inception)]
+mod streams;
+
+pub(crate) use self::prioritize::Prioritized;
+pub(crate) use self::recv::Open;
+pub(crate) use self::send::PollReset;
+pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams};
+
+use self::buffer::Buffer;
+use self::counts::Counts;
+use self::flow_control::FlowControl;
+use self::prioritize::Prioritize;
+use self::recv::Recv;
+use self::send::Send;
+use self::state::State;
+use self::store::Store;
+use self::stream::Stream;
+
+use crate::frame::{StreamId, StreamIdOverflow};
+use crate::proto::*;
+
+use bytes::Bytes;
+use std::time::Duration;
+
+#[derive(Debug)]
+pub struct Config {
+    /// Initial maximum number of locally initiated streams.
+    /// After receiving a Settings frame from the remote peer,
+    /// the connection will overwrite this value with the
+    /// MAX_CONCURRENT_STREAMS specified in the frame.
+    pub initial_max_send_streams: usize,
+
+    /// Max amount of DATA bytes to buffer per stream.
+    pub local_max_buffer_size: usize,
+
+    /// The stream ID to start the next local stream with
+    pub local_next_stream_id: StreamId,
+
+    /// If the local peer is willing to receive push promises
+    pub local_push_enabled: bool,
+
+    /// If extended connect protocol is enabled.
+    pub extended_connect_protocol_enabled: bool,
+
+    /// How long a locally reset stream should ignore frames
+    pub local_reset_duration: Duration,
+
+    /// Maximum number of locally reset streams to keep at a time
+    pub local_reset_max: usize,
+
+    /// Maximum number of remotely reset "pending accept" streams to keep at a
+    /// time. Going over this number results in a connection error.
+    pub remote_reset_max: usize,
+
+    /// Initial window size of remote initiated streams
+    pub remote_init_window_sz: WindowSize,
+
+    /// Maximum number of remote initiated streams
+    pub remote_max_initiated: Option<usize>,
+
+    /// Maximum number of locally reset streams due to protocol error across
+    /// the lifetime of the connection.
+    ///
+    /// When this gets exceeded, we issue GOAWAYs.
+    pub local_max_error_reset_streams: Option<usize>,
+}
diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs
new file mode 100644
index 0000000..14b37e2
--- /dev/null
+++ b/src/proto/streams/prioritize.rs
@@ -0,0 +1,940 @@
+use super::store::Resolve;
+use super::*;
+
+use crate::frame::Reason;
+
+use crate::codec::UserError;
+use crate::codec::UserError::*;
+
+use bytes::buf::Take;
+use std::{
+    cmp::{self, Ordering},
+    fmt, io, mem,
+    task::{Context, Poll, Waker},
+};
+
+/// # Warning
+///
+/// Queued streams are ordered by stream ID, as we need to ensure that
+/// lower-numbered streams are sent headers before higher-numbered ones.
+/// This is because "idle" stream IDs – those which have been initiated but
+/// have yet to receive frames – will be implicitly closed on receipt of a
+/// frame on a higher stream ID. If these queues was not ordered by stream
+/// IDs, some mechanism would be necessary to ensure that the lowest-numbered]
+/// idle stream is opened first.
+#[derive(Debug)]
+pub(super) struct Prioritize {
+    /// Queue of streams waiting for socket capacity to send a frame.
+    pending_send: store::Queue<stream::NextSend>,
+
+    /// Queue of streams waiting for window capacity to produce data.
+    pending_capacity: store::Queue<stream::NextSendCapacity>,
+
+    /// Streams waiting for capacity due to max concurrency
+    ///
+    /// The `SendRequest` handle is `Clone`. This enables initiating requests
+    /// from many tasks. However, offering this capability while supporting
+    /// backpressure at some level is tricky. If there are many `SendRequest`
+    /// handles and a single stream becomes available, which handle gets
+    /// assigned that stream? Maybe that handle is no longer ready to send a
+    /// request.
+    ///
+    /// The strategy used is to allow each `SendRequest` handle one buffered
+    /// request. A `SendRequest` handle is ready to send a request if it has no
+    /// associated buffered requests. This is the same strategy as `mpsc` in the
+    /// futures library.
+    pending_open: store::Queue<stream::NextOpen>,
+
+    /// Connection level flow control governing sent data
+    flow: FlowControl,
+
+    /// Stream ID of the last stream opened.
+    last_opened_id: StreamId,
+
+    /// What `DATA` frame is currently being sent in the codec.
+    in_flight_data_frame: InFlightData,
+
+    /// The maximum amount of bytes a stream should buffer.
+    max_buffer_size: usize,
+}
+
+#[derive(Debug, Eq, PartialEq)]
+enum InFlightData {
+    /// There is no `DATA` frame in flight.
+    Nothing,
+    /// There is a `DATA` frame in flight belonging to the given stream.
+    DataFrame(store::Key),
+    /// There was a `DATA` frame, but the stream's queue was since cleared.
+    Drop,
+}
+
+pub(crate) struct Prioritized<B> {
+    // The buffer
+    inner: Take<B>,
+
+    end_of_stream: bool,
+
+    // The stream that this is associated with
+    stream: store::Key,
+}
+
+// ===== impl Prioritize =====
+
+impl Prioritize {
+    pub fn new(config: &Config) -> Prioritize {
+        let mut flow = FlowControl::new();
+
+        flow.inc_window(config.remote_init_window_sz)
+            .expect("invalid initial window size");
+
+        // TODO: proper error handling
+        let _res = flow.assign_capacity(config.remote_init_window_sz);
+        debug_assert!(_res.is_ok());
+
+        tracing::trace!("Prioritize::new; flow={:?}", flow);
+
+        Prioritize {
+            pending_send: store::Queue::new(),
+            pending_capacity: store::Queue::new(),
+            pending_open: store::Queue::new(),
+            flow,
+            last_opened_id: StreamId::ZERO,
+            in_flight_data_frame: InFlightData::Nothing,
+            max_buffer_size: config.local_max_buffer_size,
+        }
+    }
+
+    pub(crate) fn max_buffer_size(&self) -> usize {
+        self.max_buffer_size
+    }
+
+    /// Queue a frame to be sent to the remote
+    pub fn queue_frame<B>(
+        &mut self,
+        frame: Frame<B>,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        task: &mut Option<Waker>,
+    ) {
+        let span = tracing::trace_span!("Prioritize::queue_frame", ?stream.id);
+        let _e = span.enter();
+        // Queue the frame in the buffer
+        stream.pending_send.push_back(buffer, frame);
+        self.schedule_send(stream, task);
+    }
+
+    pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option<Waker>) {
+        // If the stream is waiting to be opened, nothing more to do.
+        if stream.is_send_ready() {
+            tracing::trace!(?stream.id, "schedule_send");
+            // Queue the stream
+            self.pending_send.push(stream);
+
+            // Notify the connection.
+            if let Some(task) = task.take() {
+                task.wake();
+            }
+        }
+    }
+
+    pub fn queue_open(&mut self, stream: &mut store::Ptr) {
+        self.pending_open.push(stream);
+    }
+
+    /// Send a data frame
+    pub fn send_data<B>(
+        &mut self,
+        frame: frame::Data<B>,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+        task: &mut Option<Waker>,
+    ) -> Result<(), UserError>
+    where
+        B: Buf,
+    {
+        let sz = frame.payload().remaining();
+
+        if sz > MAX_WINDOW_SIZE as usize {
+            return Err(UserError::PayloadTooBig);
+        }
+
+        let sz = sz as WindowSize;
+
+        if !stream.state.is_send_streaming() {
+            if stream.state.is_closed() {
+                return Err(InactiveStreamId);
+            } else {
+                return Err(UnexpectedFrameType);
+            }
+        }
+
+        // Update the buffered data counter
+        stream.buffered_send_data += sz as usize;
+
+        let span =
+            tracing::trace_span!("send_data", sz, requested = stream.requested_send_capacity);
+        let _e = span.enter();
+        tracing::trace!(buffered = stream.buffered_send_data);
+
+        // Implicitly request more send capacity if not enough has been
+        // requested yet.
+        if (stream.requested_send_capacity as usize) < stream.buffered_send_data {
+            // Update the target requested capacity
+            stream.requested_send_capacity =
+                cmp::min(stream.buffered_send_data, WindowSize::MAX as usize) as WindowSize;
+
+            // `try_assign_capacity` will queue the stream to `pending_capacity` if the capcaity
+            // cannot be assigned at the time it is called.
+            //
+            // Streams over the max concurrent count will still call `send_data` so we should be
+            // careful not to put it into `pending_capacity` as it will starve the connection
+            // capacity for other streams
+            if !stream.is_pending_open {
+                self.try_assign_capacity(stream);
+            }
+        }
+
+        if frame.is_end_stream() {
+            stream.state.send_close();
+            self.reserve_capacity(0, stream, counts);
+        }
+
+        tracing::trace!(
+            available = %stream.send_flow.available(),
+            buffered = stream.buffered_send_data,
+        );
+
+        // The `stream.buffered_send_data == 0` check is here so that, if a zero
+        // length data frame is queued to the front (there is no previously
+        // queued data), it gets sent out immediately even if there is no
+        // available send window.
+        //
+        // Sending out zero length data frames can be done to signal
+        // end-of-stream.
+        //
+        if stream.send_flow.available() > 0 || stream.buffered_send_data == 0 {
+            // The stream currently has capacity to send the data frame, so
+            // queue it up and notify the connection task.
+            self.queue_frame(frame.into(), buffer, stream, task);
+        } else {
+            // The stream has no capacity to send the frame now, save it but
+            // don't notify the connection task. Once additional capacity
+            // becomes available, the frame will be flushed.
+            stream.pending_send.push_back(buffer, frame.into());
+        }
+
+        Ok(())
+    }
+
+    /// Request capacity to send data
+    pub fn reserve_capacity(
+        &mut self,
+        capacity: WindowSize,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+    ) {
+        let span = tracing::trace_span!(
+            "reserve_capacity",
+            ?stream.id,
+            requested = capacity,
+            effective = (capacity as usize) + stream.buffered_send_data,
+            curr = stream.requested_send_capacity
+        );
+        let _e = span.enter();
+
+        // Actual capacity is `capacity` + the current amount of buffered data.
+        // If it were less, then we could never send out the buffered data.
+        let capacity = (capacity as usize) + stream.buffered_send_data;
+
+        match capacity.cmp(&(stream.requested_send_capacity as usize)) {
+            Ordering::Equal => {
+                // Nothing to do
+            }
+            Ordering::Less => {
+                // Update the target requested capacity
+                stream.requested_send_capacity = capacity as WindowSize;
+
+                // Currently available capacity assigned to the stream
+                let available = stream.send_flow.available().as_size();
+
+                // If the stream has more assigned capacity than requested, reclaim
+                // some for the connection
+                if available as usize > capacity {
+                    let diff = available - capacity as WindowSize;
+
+                    // TODO: proper error handling
+                    let _res = stream.send_flow.claim_capacity(diff);
+                    debug_assert!(_res.is_ok());
+
+                    self.assign_connection_capacity(diff, stream, counts);
+                }
+            }
+            Ordering::Greater => {
+                // If trying to *add* capacity, but the stream send side is closed,
+                // there's nothing to be done.
+                if stream.state.is_send_closed() {
+                    return;
+                }
+
+                // Update the target requested capacity
+                stream.requested_send_capacity =
+                    cmp::min(capacity, WindowSize::MAX as usize) as WindowSize;
+
+                // Try to assign additional capacity to the stream. If none is
+                // currently available, the stream will be queued to receive some
+                // when more becomes available.
+                self.try_assign_capacity(stream);
+            }
+        }
+    }
+
+    pub fn recv_stream_window_update(
+        &mut self,
+        inc: WindowSize,
+        stream: &mut store::Ptr,
+    ) -> Result<(), Reason> {
+        let span = tracing::trace_span!(
+            "recv_stream_window_update",
+            ?stream.id,
+            ?stream.state,
+            inc,
+            flow = ?stream.send_flow
+        );
+        let _e = span.enter();
+
+        if stream.state.is_send_closed() && stream.buffered_send_data == 0 {
+            // We can't send any data, so don't bother doing anything else.
+            return Ok(());
+        }
+
+        // Update the stream level flow control.
+        stream.send_flow.inc_window(inc)?;
+
+        // If the stream is waiting on additional capacity, then this will
+        // assign it (if available on the connection) and notify the producer
+        self.try_assign_capacity(stream);
+
+        Ok(())
+    }
+
+    pub fn recv_connection_window_update(
+        &mut self,
+        inc: WindowSize,
+        store: &mut Store,
+        counts: &mut Counts,
+    ) -> Result<(), Reason> {
+        // Update the connection's window
+        self.flow.inc_window(inc)?;
+
+        self.assign_connection_capacity(inc, store, counts);
+        Ok(())
+    }
+
+    /// Reclaim all capacity assigned to the stream and re-assign it to the
+    /// connection
+    pub fn reclaim_all_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) {
+        let available = stream.send_flow.available().as_size();
+        if available > 0 {
+            // TODO: proper error handling
+            let _res = stream.send_flow.claim_capacity(available);
+            debug_assert!(_res.is_ok());
+            // Re-assign all capacity to the connection
+            self.assign_connection_capacity(available, stream, counts);
+        }
+    }
+
+    /// Reclaim just reserved capacity, not buffered capacity, and re-assign
+    /// it to the connection
+    pub fn reclaim_reserved_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) {
+        // only reclaim requested capacity that isn't already buffered
+        if stream.requested_send_capacity as usize > stream.buffered_send_data {
+            let reserved = stream.requested_send_capacity - stream.buffered_send_data as WindowSize;
+
+            // TODO: proper error handling
+            let _res = stream.send_flow.claim_capacity(reserved);
+            debug_assert!(_res.is_ok());
+            self.assign_connection_capacity(reserved, stream, counts);
+        }
+    }
+
+    pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) {
+        let span = tracing::trace_span!("clear_pending_capacity");
+        let _e = span.enter();
+        while let Some(stream) = self.pending_capacity.pop(store) {
+            counts.transition(stream, |_, stream| {
+                tracing::trace!(?stream.id, "clear_pending_capacity");
+            })
+        }
+    }
+
+    pub fn assign_connection_capacity<R>(
+        &mut self,
+        inc: WindowSize,
+        store: &mut R,
+        counts: &mut Counts,
+    ) where
+        R: Resolve,
+    {
+        let span = tracing::trace_span!("assign_connection_capacity", inc);
+        let _e = span.enter();
+
+        // TODO: proper error handling
+        let _res = self.flow.assign_capacity(inc);
+        debug_assert!(_res.is_ok());
+
+        // Assign newly acquired capacity to streams pending capacity.
+        while self.flow.available() > 0 {
+            let stream = match self.pending_capacity.pop(store) {
+                Some(stream) => stream,
+                None => return,
+            };
+
+            // Streams pending capacity may have been reset before capacity
+            // became available. In that case, the stream won't want any
+            // capacity, and so we shouldn't "transition" on it, but just evict
+            // it and continue the loop.
+            if !(stream.state.is_send_streaming() || stream.buffered_send_data > 0) {
+                continue;
+            }
+
+            counts.transition(stream, |_, stream| {
+                // Try to assign capacity to the stream. This will also re-queue the
+                // stream if there isn't enough connection level capacity to fulfill
+                // the capacity request.
+                self.try_assign_capacity(stream);
+            })
+        }
+    }
+
+    /// Request capacity to send data
+    fn try_assign_capacity(&mut self, stream: &mut store::Ptr) {
+        let total_requested = stream.requested_send_capacity;
+
+        // Total requested should never go below actual assigned
+        // (Note: the window size can go lower than assigned)
+        debug_assert!(stream.send_flow.available() <= total_requested as usize);
+
+        // The amount of additional capacity that the stream requests.
+        // Don't assign more than the window has available!
+        let additional = cmp::min(
+            total_requested - stream.send_flow.available().as_size(),
+            // Can't assign more than what is available
+            stream.send_flow.window_size() - stream.send_flow.available().as_size(),
+        );
+        let span = tracing::trace_span!("try_assign_capacity", ?stream.id);
+        let _e = span.enter();
+        tracing::trace!(
+            requested = total_requested,
+            additional,
+            buffered = stream.buffered_send_data,
+            window = stream.send_flow.window_size(),
+            conn = %self.flow.available()
+        );
+
+        if additional == 0 {
+            // Nothing more to do
+            return;
+        }
+
+        // If the stream has requested capacity, then it must be in the
+        // streaming state (more data could be sent) or there is buffered data
+        // waiting to be sent.
+        debug_assert!(
+            stream.state.is_send_streaming() || stream.buffered_send_data > 0,
+            "state={:?}",
+            stream.state
+        );
+
+        // The amount of currently available capacity on the connection
+        let conn_available = self.flow.available().as_size();
+
+        // First check if capacity is immediately available
+        if conn_available > 0 {
+            // The amount of capacity to assign to the stream
+            // TODO: Should prioritization factor into this?
+            let assign = cmp::min(conn_available, additional);
+
+            tracing::trace!(capacity = assign, "assigning");
+
+            // Assign the capacity to the stream
+            stream.assign_capacity(assign, self.max_buffer_size);
+
+            // Claim the capacity from the connection
+            // TODO: proper error handling
+            let _res = self.flow.claim_capacity(assign);
+            debug_assert!(_res.is_ok());
+        }
+
+        tracing::trace!(
+            available = %stream.send_flow.available(),
+            requested = stream.requested_send_capacity,
+            buffered = stream.buffered_send_data,
+            has_unavailable = %stream.send_flow.has_unavailable()
+        );
+
+        if stream.send_flow.available() < stream.requested_send_capacity as usize
+            && stream.send_flow.has_unavailable()
+        {
+            // The stream requires additional capacity and the stream's
+            // window has available capacity, but the connection window
+            // does not.
+            //
+            // In this case, the stream needs to be queued up for when the
+            // connection has more capacity.
+            self.pending_capacity.push(stream);
+        }
+
+        // If data is buffered and the stream is send ready, then
+        // schedule the stream for execution
+        if stream.buffered_send_data > 0 && stream.is_send_ready() {
+            // TODO: This assertion isn't *exactly* correct. There can still be
+            // buffered send data while the stream's pending send queue is
+            // empty. This can happen when a large data frame is in the process
+            // of being **partially** sent. Once the window has been sent, the
+            // data frame will be returned to the prioritization layer to be
+            // re-scheduled.
+            //
+            // That said, it would be nice to figure out how to make this
+            // assertion correctly.
+            //
+            // debug_assert!(!stream.pending_send.is_empty());
+
+            self.pending_send.push(stream);
+        }
+    }
+
+    pub fn poll_complete<T, B>(
+        &mut self,
+        cx: &mut Context,
+        buffer: &mut Buffer<Frame<B>>,
+        store: &mut Store,
+        counts: &mut Counts,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        // Ensure codec is ready
+        ready!(dst.poll_ready(cx))?;
+
+        // Reclaim any frame that has previously been written
+        self.reclaim_frame(buffer, store, dst);
+
+        // The max frame length
+        let max_frame_len = dst.max_send_frame_size();
+
+        tracing::trace!("poll_complete");
+
+        loop {
+            if let Some(mut stream) = self.pop_pending_open(store, counts) {
+                self.pending_send.push_front(&mut stream);
+                self.try_assign_capacity(&mut stream);
+            }
+
+            match self.pop_frame(buffer, store, max_frame_len, counts) {
+                Some(frame) => {
+                    tracing::trace!(?frame, "writing");
+
+                    debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing);
+                    if let Frame::Data(ref frame) = frame {
+                        self.in_flight_data_frame = InFlightData::DataFrame(frame.payload().stream);
+                    }
+                    dst.buffer(frame).expect("invalid frame");
+
+                    // Ensure the codec is ready to try the loop again.
+                    ready!(dst.poll_ready(cx))?;
+
+                    // Because, always try to reclaim...
+                    self.reclaim_frame(buffer, store, dst);
+                }
+                None => {
+                    // Try to flush the codec.
+                    ready!(dst.flush(cx))?;
+
+                    // This might release a data frame...
+                    if !self.reclaim_frame(buffer, store, dst) {
+                        return Poll::Ready(Ok(()));
+                    }
+
+                    // No need to poll ready as poll_complete() does this for
+                    // us...
+                }
+            }
+        }
+    }
+
+    /// Tries to reclaim a pending data frame from the codec.
+    ///
+    /// Returns true if a frame was reclaimed.
+    ///
+    /// When a data frame is written to the codec, it may not be written in its
+    /// entirety (large chunks are split up into potentially many data frames).
+    /// In this case, the stream needs to be reprioritized.
+    fn reclaim_frame<T, B>(
+        &mut self,
+        buffer: &mut Buffer<Frame<B>>,
+        store: &mut Store,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> bool
+    where
+        B: Buf,
+    {
+        let span = tracing::trace_span!("try_reclaim_frame");
+        let _e = span.enter();
+
+        // First check if there are any data chunks to take back
+        if let Some(frame) = dst.take_last_data_frame() {
+            self.reclaim_frame_inner(buffer, store, frame)
+        } else {
+            false
+        }
+    }
+
+    fn reclaim_frame_inner<B>(
+        &mut self,
+        buffer: &mut Buffer<Frame<B>>,
+        store: &mut Store,
+        frame: frame::Data<Prioritized<B>>,
+    ) -> bool
+    where
+        B: Buf,
+    {
+        tracing::trace!(
+            ?frame,
+            sz = frame.payload().inner.get_ref().remaining(),
+            "reclaimed"
+        );
+
+        let mut eos = false;
+        let key = frame.payload().stream;
+
+        match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) {
+            InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"),
+            InFlightData::Drop => {
+                tracing::trace!("not reclaiming frame for cancelled stream");
+                return false;
+            }
+            InFlightData::DataFrame(k) => {
+                debug_assert_eq!(k, key);
+            }
+        }
+
+        let mut frame = frame.map(|prioritized| {
+            // TODO: Ensure fully written
+            eos = prioritized.end_of_stream;
+            prioritized.inner.into_inner()
+        });
+
+        if frame.payload().has_remaining() {
+            let mut stream = store.resolve(key);
+
+            if eos {
+                frame.set_end_stream(true);
+            }
+
+            self.push_back_frame(frame.into(), buffer, &mut stream);
+
+            return true;
+        }
+
+        false
+    }
+
+    /// Push the frame to the front of the stream's deque, scheduling the
+    /// stream if needed.
+    fn push_back_frame<B>(
+        &mut self,
+        frame: Frame<B>,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+    ) {
+        // Push the frame to the front of the stream's deque
+        stream.pending_send.push_front(buffer, frame);
+
+        // If needed, schedule the sender
+        if stream.send_flow.available() > 0 {
+            debug_assert!(!stream.pending_send.is_empty());
+            self.pending_send.push(stream);
+        }
+    }
+
+    pub fn clear_queue<B>(&mut self, buffer: &mut Buffer<Frame<B>>, stream: &mut store::Ptr) {
+        let span = tracing::trace_span!("clear_queue", ?stream.id);
+        let _e = span.enter();
+
+        // TODO: make this more efficient?
+        while let Some(frame) = stream.pending_send.pop_front(buffer) {
+            tracing::trace!(?frame, "dropping");
+        }
+
+        stream.buffered_send_data = 0;
+        stream.requested_send_capacity = 0;
+        if let InFlightData::DataFrame(key) = self.in_flight_data_frame {
+            if stream.key() == key {
+                // This stream could get cleaned up now - don't allow the buffered frame to get reclaimed.
+                self.in_flight_data_frame = InFlightData::Drop;
+            }
+        }
+    }
+
+    pub fn clear_pending_send(&mut self, store: &mut Store, counts: &mut Counts) {
+        while let Some(stream) = self.pending_send.pop(store) {
+            let is_pending_reset = stream.is_pending_reset_expiration();
+            counts.transition_after(stream, is_pending_reset);
+        }
+    }
+
+    pub fn clear_pending_open(&mut self, store: &mut Store, counts: &mut Counts) {
+        while let Some(stream) = self.pending_open.pop(store) {
+            let is_pending_reset = stream.is_pending_reset_expiration();
+            counts.transition_after(stream, is_pending_reset);
+        }
+    }
+
+    fn pop_frame<B>(
+        &mut self,
+        buffer: &mut Buffer<Frame<B>>,
+        store: &mut Store,
+        max_len: usize,
+        counts: &mut Counts,
+    ) -> Option<Frame<Prioritized<B>>>
+    where
+        B: Buf,
+    {
+        let span = tracing::trace_span!("pop_frame");
+        let _e = span.enter();
+
+        loop {
+            match self.pending_send.pop(store) {
+                Some(mut stream) => {
+                    let span = tracing::trace_span!("popped", ?stream.id, ?stream.state);
+                    let _e = span.enter();
+
+                    // It's possible that this stream, besides having data to send,
+                    // is also queued to send a reset, and thus is already in the queue
+                    // to wait for "some time" after a reset.
+                    //
+                    // To be safe, we just always ask the stream.
+                    let is_pending_reset = stream.is_pending_reset_expiration();
+
+                    tracing::trace!(is_pending_reset);
+
+                    let frame = match stream.pending_send.pop_front(buffer) {
+                        Some(Frame::Data(mut frame)) => {
+                            // Get the amount of capacity remaining for stream's
+                            // window.
+                            let stream_capacity = stream.send_flow.available();
+                            let sz = frame.payload().remaining();
+
+                            tracing::trace!(
+                                sz,
+                                eos = frame.is_end_stream(),
+                                window = %stream_capacity,
+                                available = %stream.send_flow.available(),
+                                requested = stream.requested_send_capacity,
+                                buffered = stream.buffered_send_data,
+                                "data frame"
+                            );
+
+                            // Zero length data frames always have capacity to
+                            // be sent.
+                            if sz > 0 && stream_capacity == 0 {
+                                tracing::trace!("stream capacity is 0");
+
+                                // Ensure that the stream is waiting for
+                                // connection level capacity
+                                //
+                                // TODO: uncomment
+                                // debug_assert!(stream.is_pending_send_capacity);
+
+                                // The stream has no more capacity, this can
+                                // happen if the remote reduced the stream
+                                // window. In this case, we need to buffer the
+                                // frame and wait for a window update...
+                                stream.pending_send.push_front(buffer, frame.into());
+
+                                continue;
+                            }
+
+                            // Only send up to the max frame length
+                            let len = cmp::min(sz, max_len);
+
+                            // Only send up to the stream's window capacity
+                            let len =
+                                cmp::min(len, stream_capacity.as_size() as usize) as WindowSize;
+
+                            // There *must* be be enough connection level
+                            // capacity at this point.
+                            debug_assert!(len <= self.flow.window_size());
+
+                            // Check if the stream level window the peer knows is available. In some
+                            // scenarios, maybe the window we know is available but the window which
+                            // peer knows is not.
+                            if len > 0 && len > stream.send_flow.window_size() {
+                                stream.pending_send.push_front(buffer, frame.into());
+                                continue;
+                            }
+
+                            tracing::trace!(len, "sending data frame");
+
+                            // Update the flow control
+                            tracing::trace_span!("updating stream flow").in_scope(|| {
+                                stream.send_data(len, self.max_buffer_size);
+
+                                // Assign the capacity back to the connection that
+                                // was just consumed from the stream in the previous
+                                // line.
+                                // TODO: proper error handling
+                                let _res = self.flow.assign_capacity(len);
+                                debug_assert!(_res.is_ok());
+                            });
+
+                            let (eos, len) = tracing::trace_span!("updating connection flow")
+                                .in_scope(|| {
+                                    // TODO: proper error handling
+                                    let _res = self.flow.send_data(len);
+                                    debug_assert!(_res.is_ok());
+
+                                    // Wrap the frame's data payload to ensure that the
+                                    // correct amount of data gets written.
+
+                                    let eos = frame.is_end_stream();
+                                    let len = len as usize;
+
+                                    if frame.payload().remaining() > len {
+                                        frame.set_end_stream(false);
+                                    }
+                                    (eos, len)
+                                });
+
+                            Frame::Data(frame.map(|buf| Prioritized {
+                                inner: buf.take(len),
+                                end_of_stream: eos,
+                                stream: stream.key(),
+                            }))
+                        }
+                        Some(Frame::PushPromise(pp)) => {
+                            let mut pushed =
+                                stream.store_mut().find_mut(&pp.promised_id()).unwrap();
+                            pushed.is_pending_push = false;
+                            // Transition stream from pending_push to pending_open
+                            // if possible
+                            if !pushed.pending_send.is_empty() {
+                                if counts.can_inc_num_send_streams() {
+                                    counts.inc_num_send_streams(&mut pushed);
+                                    self.pending_send.push(&mut pushed);
+                                } else {
+                                    self.queue_open(&mut pushed);
+                                }
+                            }
+                            Frame::PushPromise(pp)
+                        }
+                        Some(frame) => frame.map(|_| {
+                            unreachable!(
+                                "Frame::map closure will only be called \
+                                 on DATA frames."
+                            )
+                        }),
+                        None => {
+                            if let Some(reason) = stream.state.get_scheduled_reset() {
+                                let stream_id = stream.id;
+                                stream
+                                    .state
+                                    .set_reset(stream_id, reason, Initiator::Library);
+
+                                let frame = frame::Reset::new(stream.id, reason);
+                                Frame::Reset(frame)
+                            } else {
+                                // If the stream receives a RESET from the peer, it may have
+                                // had data buffered to be sent, but all the frames are cleared
+                                // in clear_queue(). Instead of doing O(N) traversal through queue
+                                // to remove, lets just ignore the stream here.
+                                tracing::trace!("removing dangling stream from pending_send");
+                                // Since this should only happen as a consequence of `clear_queue`,
+                                // we must be in a closed state of some kind.
+                                debug_assert!(stream.state.is_closed());
+                                counts.transition_after(stream, is_pending_reset);
+                                continue;
+                            }
+                        }
+                    };
+
+                    tracing::trace!("pop_frame; frame={:?}", frame);
+
+                    if cfg!(debug_assertions) && stream.state.is_idle() {
+                        debug_assert!(stream.id > self.last_opened_id);
+                        self.last_opened_id = stream.id;
+                    }
+
+                    if !stream.pending_send.is_empty() || stream.state.is_scheduled_reset() {
+                        // TODO: Only requeue the sender IF it is ready to send
+                        // the next frame. i.e. don't requeue it if the next
+                        // frame is a data frame and the stream does not have
+                        // any more capacity.
+                        self.pending_send.push(&mut stream);
+                    }
+
+                    counts.transition_after(stream, is_pending_reset);
+
+                    return Some(frame);
+                }
+                None => return None,
+            }
+        }
+    }
+
+    fn pop_pending_open<'s>(
+        &mut self,
+        store: &'s mut Store,
+        counts: &mut Counts,
+    ) -> Option<store::Ptr<'s>> {
+        tracing::trace!("schedule_pending_open");
+        // check for any pending open streams
+        if counts.can_inc_num_send_streams() {
+            if let Some(mut stream) = self.pending_open.pop(store) {
+                tracing::trace!("schedule_pending_open; stream={:?}", stream.id);
+
+                counts.inc_num_send_streams(&mut stream);
+                stream.notify_send();
+                return Some(stream);
+            }
+        }
+
+        None
+    }
+}
+
+// ===== impl Prioritized =====
+
+impl<B> Buf for Prioritized<B>
+where
+    B: Buf,
+{
+    fn remaining(&self) -> usize {
+        self.inner.remaining()
+    }
+
+    fn chunk(&self) -> &[u8] {
+        self.inner.chunk()
+    }
+
+    fn chunks_vectored<'a>(&'a self, dst: &mut [std::io::IoSlice<'a>]) -> usize {
+        self.inner.chunks_vectored(dst)
+    }
+
+    fn advance(&mut self, cnt: usize) {
+        self.inner.advance(cnt)
+    }
+}
+
+impl<B: Buf> fmt::Debug for Prioritized<B> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("Prioritized")
+            .field("remaining", &self.inner.get_ref().remaining())
+            .field("end_of_stream", &self.end_of_stream)
+            .field("stream", &self.stream)
+            .finish()
+    }
+}
diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs
new file mode 100644
index 0000000..46cb87c
--- /dev/null
+++ b/src/proto/streams/recv.rs
@@ -0,0 +1,1166 @@
+use super::*;
+use crate::codec::UserError;
+use crate::frame::{PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE};
+use crate::proto;
+
+use http::{HeaderMap, Request, Response};
+
+use std::cmp::Ordering;
+use std::io;
+use std::task::{Context, Poll, Waker};
+use std::time::Instant;
+
+#[derive(Debug)]
+pub(super) struct Recv {
+    /// Initial window size of remote initiated streams
+    init_window_sz: WindowSize,
+
+    /// Connection level flow control governing received data
+    flow: FlowControl,
+
+    /// Amount of connection window capacity currently used by outstanding streams.
+    in_flight_data: WindowSize,
+
+    /// The lowest stream ID that is still idle
+    next_stream_id: Result<StreamId, StreamIdOverflow>,
+
+    /// The stream ID of the last processed stream
+    last_processed_id: StreamId,
+
+    /// Any streams with a higher ID are ignored.
+    ///
+    /// This starts as MAX, but is lowered when a GOAWAY is received.
+    ///
+    /// > After sending a GOAWAY frame, the sender can discard frames for
+    /// > streams initiated by the receiver with identifiers higher than
+    /// > the identified last stream.
+    max_stream_id: StreamId,
+
+    /// Streams that have pending window updates
+    pending_window_updates: store::Queue<stream::NextWindowUpdate>,
+
+    /// New streams to be accepted
+    pending_accept: store::Queue<stream::NextAccept>,
+
+    /// Locally reset streams that should be reaped when they expire
+    pending_reset_expired: store::Queue<stream::NextResetExpire>,
+
+    /// How long locally reset streams should ignore received frames
+    reset_duration: Duration,
+
+    /// Holds frames that are waiting to be read
+    buffer: Buffer<Event>,
+
+    /// Refused StreamId, this represents a frame that must be sent out.
+    refused: Option<StreamId>,
+
+    /// If push promises are allowed to be received.
+    is_push_enabled: bool,
+
+    /// If extended connect protocol is enabled.
+    is_extended_connect_protocol_enabled: bool,
+}
+
+#[derive(Debug)]
+pub(super) enum Event {
+    Headers(peer::PollMessage),
+    Data(Bytes),
+    Trailers(HeaderMap),
+}
+
+#[derive(Debug)]
+pub(super) enum RecvHeaderBlockError<T> {
+    Oversize(T),
+    State(Error),
+}
+
+#[derive(Debug)]
+pub(crate) enum Open {
+    PushPromise,
+    Headers,
+}
+
+impl Recv {
+    pub fn new(peer: peer::Dyn, config: &Config) -> Self {
+        let next_stream_id = if peer.is_server() { 1 } else { 2 };
+
+        let mut flow = FlowControl::new();
+
+        // connections always have the default window size, regardless of
+        // settings
+        flow.inc_window(DEFAULT_INITIAL_WINDOW_SIZE)
+            .expect("invalid initial remote window size");
+        flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE).unwrap();
+
+        Recv {
+            init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE,
+            flow,
+            in_flight_data: 0 as WindowSize,
+            next_stream_id: Ok(next_stream_id.into()),
+            pending_window_updates: store::Queue::new(),
+            last_processed_id: StreamId::ZERO,
+            max_stream_id: StreamId::MAX,
+            pending_accept: store::Queue::new(),
+            pending_reset_expired: store::Queue::new(),
+            reset_duration: config.local_reset_duration,
+            buffer: Buffer::new(),
+            refused: None,
+            is_push_enabled: config.local_push_enabled,
+            is_extended_connect_protocol_enabled: config.extended_connect_protocol_enabled,
+        }
+    }
+
+    /// Returns the initial receive window size
+    pub fn init_window_sz(&self) -> WindowSize {
+        self.init_window_sz
+    }
+
+    /// Returns the ID of the last processed stream
+    pub fn last_processed_id(&self) -> StreamId {
+        self.last_processed_id
+    }
+
+    /// Update state reflecting a new, remotely opened stream
+    ///
+    /// Returns the stream state if successful. `None` if refused
+    pub fn open(
+        &mut self,
+        id: StreamId,
+        mode: Open,
+        counts: &mut Counts,
+    ) -> Result<Option<StreamId>, Error> {
+        assert!(self.refused.is_none());
+
+        counts.peer().ensure_can_open(id, mode)?;
+
+        let next_id = self.next_stream_id()?;
+        if id < next_id {
+            proto_err!(conn: "id ({:?}) < next_id ({:?})", id, next_id);
+            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+        }
+
+        self.next_stream_id = id.next_id();
+
+        if !counts.can_inc_num_recv_streams() {
+            self.refused = Some(id);
+            return Ok(None);
+        }
+
+        Ok(Some(id))
+    }
+
+    /// Transition the stream state based on receiving headers
+    ///
+    /// The caller ensures that the frame represents headers and not trailers.
+    pub fn recv_headers(
+        &mut self,
+        frame: frame::Headers,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+    ) -> Result<(), RecvHeaderBlockError<Option<frame::Headers>>> {
+        tracing::trace!("opening stream; init_window={}", self.init_window_sz);
+        let is_initial = stream.state.recv_open(&frame)?;
+
+        if is_initial {
+            // TODO: be smarter about this logic
+            if frame.stream_id() > self.last_processed_id {
+                self.last_processed_id = frame.stream_id();
+            }
+
+            // Increment the number of concurrent streams
+            counts.inc_num_recv_streams(stream);
+        }
+
+        if !stream.content_length.is_head() {
+            use super::stream::ContentLength;
+            use http::header;
+
+            if let Some(content_length) = frame.fields().get(header::CONTENT_LENGTH) {
+                let content_length = match frame::parse_u64(content_length.as_bytes()) {
+                    Ok(v) => v,
+                    Err(_) => {
+                        proto_err!(stream: "could not parse content-length; stream={:?}", stream.id);
+                        return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into());
+                    }
+                };
+
+                stream.content_length = ContentLength::Remaining(content_length);
+            }
+        }
+
+        if frame.is_over_size() {
+            // A frame is over size if the decoded header block was bigger than
+            // SETTINGS_MAX_HEADER_LIST_SIZE.
+            //
+            // > A server that receives a larger header block than it is willing
+            // > to handle can send an HTTP 431 (Request Header Fields Too
+            // > Large) status code [RFC6585]. A client can discard responses
+            // > that it cannot process.
+            //
+            // So, if peer is a server, we'll send a 431. In either case,
+            // an error is recorded, which will send a REFUSED_STREAM,
+            // since we don't want any of the data frames either.
+            tracing::debug!(
+                "stream error REQUEST_HEADER_FIELDS_TOO_LARGE -- \
+                 recv_headers: frame is over size; stream={:?}",
+                stream.id
+            );
+            return if counts.peer().is_server() && is_initial {
+                let mut res = frame::Headers::new(
+                    stream.id,
+                    frame::Pseudo::response(::http::StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE),
+                    HeaderMap::new(),
+                );
+                res.set_end_stream();
+                Err(RecvHeaderBlockError::Oversize(Some(res)))
+            } else {
+                Err(RecvHeaderBlockError::Oversize(None))
+            };
+        }
+
+        let stream_id = frame.stream_id();
+        let (pseudo, fields) = frame.into_parts();
+
+        if pseudo.protocol.is_some()
+            && counts.peer().is_server()
+            && !self.is_extended_connect_protocol_enabled
+        {
+            proto_err!(stream: "cannot use :protocol if extended connect protocol is disabled; stream={:?}", stream.id);
+            return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into());
+        }
+
+        if pseudo.status.is_some() && counts.peer().is_server() {
+            proto_err!(stream: "cannot use :status header for requests; stream={:?}", stream.id);
+            return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into());
+        }
+
+        if !pseudo.is_informational() {
+            let message = counts
+                .peer()
+                .convert_poll_message(pseudo, fields, stream_id)?;
+
+            // Push the frame onto the stream's recv buffer
+            stream
+                .pending_recv
+                .push_back(&mut self.buffer, Event::Headers(message));
+            stream.notify_recv();
+
+            // Only servers can receive a headers frame that initiates the stream.
+            // This is verified in `Streams` before calling this function.
+            if counts.peer().is_server() {
+                // Correctness: never push a stream to `pending_accept` without having the
+                // corresponding headers frame pushed to `stream.pending_recv`.
+                self.pending_accept.push(stream);
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Called by the server to get the request
+    ///
+    /// # Panics
+    ///
+    /// Panics if `stream.pending_recv` has no `Event::Headers` queued.
+    ///
+    pub fn take_request(&mut self, stream: &mut store::Ptr) -> Request<()> {
+        use super::peer::PollMessage::*;
+
+        match stream.pending_recv.pop_front(&mut self.buffer) {
+            Some(Event::Headers(Server(request))) => request,
+            _ => unreachable!("server stream queue must start with Headers"),
+        }
+    }
+
+    /// Called by the client to get pushed response
+    pub fn poll_pushed(
+        &mut self,
+        cx: &Context,
+        stream: &mut store::Ptr,
+    ) -> Poll<Option<Result<(Request<()>, store::Key), proto::Error>>> {
+        use super::peer::PollMessage::*;
+
+        let mut ppp = stream.pending_push_promises.take();
+        let pushed = ppp.pop(stream.store_mut()).map(|mut pushed| {
+            match pushed.pending_recv.pop_front(&mut self.buffer) {
+                Some(Event::Headers(Server(headers))) => (headers, pushed.key()),
+                // When frames are pushed into the queue, it is verified that
+                // the first frame is a HEADERS frame.
+                _ => panic!("Headers not set on pushed stream"),
+            }
+        });
+        stream.pending_push_promises = ppp;
+        if let Some(p) = pushed {
+            Poll::Ready(Some(Ok(p)))
+        } else {
+            let is_open = stream.state.ensure_recv_open()?;
+
+            if is_open {
+                stream.recv_task = Some(cx.waker().clone());
+                Poll::Pending
+            } else {
+                Poll::Ready(None)
+            }
+        }
+    }
+
+    /// Called by the client to get the response
+    pub fn poll_response(
+        &mut self,
+        cx: &Context,
+        stream: &mut store::Ptr,
+    ) -> Poll<Result<Response<()>, proto::Error>> {
+        use super::peer::PollMessage::*;
+
+        // If the buffer is not empty, then the first frame must be a HEADERS
+        // frame or the user violated the contract.
+        match stream.pending_recv.pop_front(&mut self.buffer) {
+            Some(Event::Headers(Client(response))) => Poll::Ready(Ok(response)),
+            Some(_) => panic!("poll_response called after response returned"),
+            None => {
+                if !stream.state.ensure_recv_open()? {
+                    proto_err!(stream: "poll_response: stream={:?} is not opened;",  stream.id);
+                    return Poll::Ready(Err(Error::library_reset(
+                        stream.id,
+                        Reason::PROTOCOL_ERROR,
+                    )));
+                }
+
+                stream.recv_task = Some(cx.waker().clone());
+                Poll::Pending
+            }
+        }
+    }
+
+    /// Transition the stream based on receiving trailers
+    pub fn recv_trailers(
+        &mut self,
+        frame: frame::Headers,
+        stream: &mut store::Ptr,
+    ) -> Result<(), Error> {
+        // Transition the state
+        stream.state.recv_close()?;
+
+        if stream.ensure_content_length_zero().is_err() {
+            proto_err!(stream: "recv_trailers: content-length is not zero; stream={:?};",  stream.id);
+            return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR));
+        }
+
+        let trailers = frame.into_fields();
+
+        // Push the frame onto the stream's recv buffer
+        stream
+            .pending_recv
+            .push_back(&mut self.buffer, Event::Trailers(trailers));
+        stream.notify_recv();
+
+        Ok(())
+    }
+
+    /// Releases capacity of the connection
+    pub fn release_connection_capacity(&mut self, capacity: WindowSize, task: &mut Option<Waker>) {
+        tracing::trace!(
+            "release_connection_capacity; size={}, connection in_flight_data={}",
+            capacity,
+            self.in_flight_data,
+        );
+
+        // Decrement in-flight data
+        self.in_flight_data -= capacity;
+
+        // Assign capacity to connection
+        // TODO: proper error handling
+        let _res = self.flow.assign_capacity(capacity);
+        debug_assert!(_res.is_ok());
+
+        if self.flow.unclaimed_capacity().is_some() {
+            if let Some(task) = task.take() {
+                task.wake();
+            }
+        }
+    }
+
+    /// Releases capacity back to the connection & stream
+    pub fn release_capacity(
+        &mut self,
+        capacity: WindowSize,
+        stream: &mut store::Ptr,
+        task: &mut Option<Waker>,
+    ) -> Result<(), UserError> {
+        tracing::trace!("release_capacity; size={}", capacity);
+
+        if capacity > stream.in_flight_recv_data {
+            return Err(UserError::ReleaseCapacityTooBig);
+        }
+
+        self.release_connection_capacity(capacity, task);
+
+        // Decrement in-flight data
+        stream.in_flight_recv_data -= capacity;
+
+        // Assign capacity to stream
+        // TODO: proper error handling
+        let _res = stream.recv_flow.assign_capacity(capacity);
+        debug_assert!(_res.is_ok());
+
+        if stream.recv_flow.unclaimed_capacity().is_some() {
+            // Queue the stream for sending the WINDOW_UPDATE frame.
+            self.pending_window_updates.push(stream);
+
+            if let Some(task) = task.take() {
+                task.wake();
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Release any unclaimed capacity for a closed stream.
+    pub fn release_closed_capacity(&mut self, stream: &mut store::Ptr, task: &mut Option<Waker>) {
+        debug_assert_eq!(stream.ref_count, 0);
+
+        if stream.in_flight_recv_data == 0 {
+            return;
+        }
+
+        tracing::trace!(
+            "auto-release closed stream ({:?}) capacity: {:?}",
+            stream.id,
+            stream.in_flight_recv_data,
+        );
+
+        self.release_connection_capacity(stream.in_flight_recv_data, task);
+        stream.in_flight_recv_data = 0;
+
+        self.clear_recv_buffer(stream);
+    }
+
+    /// Set the "target" connection window size.
+    ///
+    /// By default, all new connections start with 64kb of window size. As
+    /// streams used and release capacity, we will send WINDOW_UPDATEs for the
+    /// connection to bring it back up to the initial "target".
+    ///
+    /// Setting a target means that we will try to tell the peer about
+    /// WINDOW_UPDATEs so the peer knows it has about `target` window to use
+    /// for the whole connection.
+    ///
+    /// The `task` is an optional parked task for the `Connection` that might
+    /// be blocked on needing more window capacity.
+    pub fn set_target_connection_window(
+        &mut self,
+        target: WindowSize,
+        task: &mut Option<Waker>,
+    ) -> Result<(), Reason> {
+        tracing::trace!(
+            "set_target_connection_window; target={}; available={}, reserved={}",
+            target,
+            self.flow.available(),
+            self.in_flight_data,
+        );
+
+        // The current target connection window is our `available` plus any
+        // in-flight data reserved by streams.
+        //
+        // Update the flow controller with the difference between the new
+        // target and the current target.
+        let current = self
+            .flow
+            .available()
+            .add(self.in_flight_data)?
+            .checked_size();
+        if target > current {
+            self.flow.assign_capacity(target - current)?;
+        } else {
+            self.flow.claim_capacity(current - target)?;
+        }
+
+        // If changing the target capacity means we gained a bunch of capacity,
+        // enough that we went over the update threshold, then schedule sending
+        // a connection WINDOW_UPDATE.
+        if self.flow.unclaimed_capacity().is_some() {
+            if let Some(task) = task.take() {
+                task.wake();
+            }
+        }
+        Ok(())
+    }
+
+    pub(crate) fn apply_local_settings(
+        &mut self,
+        settings: &frame::Settings,
+        store: &mut Store,
+    ) -> Result<(), proto::Error> {
+        if let Some(val) = settings.is_extended_connect_protocol_enabled() {
+            self.is_extended_connect_protocol_enabled = val;
+        }
+
+        if let Some(target) = settings.initial_window_size() {
+            let old_sz = self.init_window_sz;
+            self.init_window_sz = target;
+
+            tracing::trace!("update_initial_window_size; new={}; old={}", target, old_sz,);
+
+            // Per RFC 7540 §6.9.2:
+            //
+            // In addition to changing the flow-control window for streams that are
+            // not yet active, a SETTINGS frame can alter the initial flow-control
+            // window size for streams with active flow-control windows (that is,
+            // streams in the "open" or "half-closed (remote)" state). When the
+            // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust
+            // the size of all stream flow-control windows that it maintains by the
+            // difference between the new value and the old value.
+            //
+            // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available
+            // space in a flow-control window to become negative. A sender MUST
+            // track the negative flow-control window and MUST NOT send new
+            // flow-controlled frames until it receives WINDOW_UPDATE frames that
+            // cause the flow-control window to become positive.
+
+            match target.cmp(&old_sz) {
+                Ordering::Less => {
+                    // We must decrease the (local) window on every open stream.
+                    let dec = old_sz - target;
+                    tracing::trace!("decrementing all windows; dec={}", dec);
+
+                    store.try_for_each(|mut stream| {
+                        stream
+                            .recv_flow
+                            .dec_recv_window(dec)
+                            .map_err(proto::Error::library_go_away)?;
+                        Ok::<_, proto::Error>(())
+                    })?;
+                }
+                Ordering::Greater => {
+                    // We must increase the (local) window on every open stream.
+                    let inc = target - old_sz;
+                    tracing::trace!("incrementing all windows; inc={}", inc);
+                    store.try_for_each(|mut stream| {
+                        // XXX: Shouldn't the peer have already noticed our
+                        // overflow and sent us a GOAWAY?
+                        stream
+                            .recv_flow
+                            .inc_window(inc)
+                            .map_err(proto::Error::library_go_away)?;
+                        stream
+                            .recv_flow
+                            .assign_capacity(inc)
+                            .map_err(proto::Error::library_go_away)?;
+                        Ok::<_, proto::Error>(())
+                    })?;
+                }
+                Ordering::Equal => (),
+            }
+        }
+
+        Ok(())
+    }
+
+    pub fn is_end_stream(&self, stream: &store::Ptr) -> bool {
+        if !stream.state.is_recv_closed() {
+            return false;
+        }
+
+        stream.pending_recv.is_empty()
+    }
+
+    pub fn recv_data(&mut self, frame: frame::Data, stream: &mut store::Ptr) -> Result<(), Error> {
+        let sz = frame.payload().len();
+
+        // This should have been enforced at the codec::FramedRead layer, so
+        // this is just a sanity check.
+        assert!(sz <= MAX_WINDOW_SIZE as usize);
+
+        let sz = sz as WindowSize;
+
+        let is_ignoring_frame = stream.state.is_local_error();
+
+        if !is_ignoring_frame && !stream.state.is_recv_streaming() {
+            // TODO: There are cases where this can be a stream error of
+            // STREAM_CLOSED instead...
+
+            // Receiving a DATA frame when not expecting one is a protocol
+            // error.
+            proto_err!(conn: "unexpected DATA frame; stream={:?}", stream.id);
+            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+        }
+
+        tracing::trace!(
+            "recv_data; size={}; connection={}; stream={}",
+            sz,
+            self.flow.window_size(),
+            stream.recv_flow.window_size()
+        );
+
+        if is_ignoring_frame {
+            tracing::trace!(
+                "recv_data; frame ignored on locally reset {:?} for some time",
+                stream.id,
+            );
+            return self.ignore_data(sz);
+        }
+
+        // Ensure that there is enough capacity on the connection before acting
+        // on the stream.
+        self.consume_connection_window(sz)?;
+
+        if stream.recv_flow.window_size() < sz {
+            // http://httpwg.org/specs/rfc7540.html#WINDOW_UPDATE
+            // > A receiver MAY respond with a stream error (Section 5.4.2) or
+            // > connection error (Section 5.4.1) of type FLOW_CONTROL_ERROR if
+            // > it is unable to accept a frame.
+            //
+            // So, for violating the **stream** window, we can send either a
+            // stream or connection error. We've opted to send a stream
+            // error.
+            return Err(Error::library_reset(stream.id, Reason::FLOW_CONTROL_ERROR));
+        }
+
+        if stream.dec_content_length(frame.payload().len()).is_err() {
+            proto_err!(stream:
+                "recv_data: content-length overflow; stream={:?}; len={:?}",
+                stream.id,
+                frame.payload().len(),
+            );
+            return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR));
+        }
+
+        if frame.is_end_stream() {
+            if stream.ensure_content_length_zero().is_err() {
+                proto_err!(stream:
+                    "recv_data: content-length underflow; stream={:?}; len={:?}",
+                    stream.id,
+                    frame.payload().len(),
+                );
+                return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR));
+            }
+
+            if stream.state.recv_close().is_err() {
+                proto_err!(conn: "recv_data: failed to transition to closed state; stream={:?}", stream.id);
+                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+            }
+        }
+
+        // Received a frame, but no one cared about it. fix issue#648
+        if !stream.is_recv {
+            tracing::trace!(
+                "recv_data; frame ignored on stream release {:?} for some time",
+                stream.id,
+            );
+            self.release_connection_capacity(sz, &mut None);
+            return Ok(());
+        }
+
+        // Update stream level flow control
+        stream
+            .recv_flow
+            .send_data(sz)
+            .map_err(proto::Error::library_go_away)?;
+
+        // Track the data as in-flight
+        stream.in_flight_recv_data += sz;
+
+        let event = Event::Data(frame.into_payload());
+
+        // Push the frame onto the recv buffer
+        stream.pending_recv.push_back(&mut self.buffer, event);
+        stream.notify_recv();
+
+        Ok(())
+    }
+
+    pub fn ignore_data(&mut self, sz: WindowSize) -> Result<(), Error> {
+        // Ensure that there is enough capacity on the connection...
+        self.consume_connection_window(sz)?;
+
+        // Since we are ignoring this frame,
+        // we aren't returning the frame to the user. That means they
+        // have no way to release the capacity back to the connection. So
+        // we have to release it automatically.
+        //
+        // This call doesn't send a WINDOW_UPDATE immediately, just marks
+        // the capacity as available to be reclaimed. When the available
+        // capacity meets a threshold, a WINDOW_UPDATE is then sent.
+        self.release_connection_capacity(sz, &mut None);
+        Ok(())
+    }
+
+    pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), Error> {
+        if self.flow.window_size() < sz {
+            tracing::debug!(
+                "connection error FLOW_CONTROL_ERROR -- window_size ({:?}) < sz ({:?});",
+                self.flow.window_size(),
+                sz,
+            );
+            return Err(Error::library_go_away(Reason::FLOW_CONTROL_ERROR));
+        }
+
+        // Update connection level flow control
+        self.flow.send_data(sz).map_err(Error::library_go_away)?;
+
+        // Track the data as in-flight
+        self.in_flight_data += sz;
+        Ok(())
+    }
+
+    pub fn recv_push_promise(
+        &mut self,
+        frame: frame::PushPromise,
+        stream: &mut store::Ptr,
+    ) -> Result<(), Error> {
+        stream.state.reserve_remote()?;
+        if frame.is_over_size() {
+            // A frame is over size if the decoded header block was bigger than
+            // SETTINGS_MAX_HEADER_LIST_SIZE.
+            //
+            // > A server that receives a larger header block than it is willing
+            // > to handle can send an HTTP 431 (Request Header Fields Too
+            // > Large) status code [RFC6585]. A client can discard responses
+            // > that it cannot process.
+            //
+            // So, if peer is a server, we'll send a 431. In either case,
+            // an error is recorded, which will send a REFUSED_STREAM,
+            // since we don't want any of the data frames either.
+            tracing::debug!(
+                "stream error REFUSED_STREAM -- recv_push_promise: \
+                 headers frame is over size; promised_id={:?};",
+                frame.promised_id(),
+            );
+            return Err(Error::library_reset(
+                frame.promised_id(),
+                Reason::REFUSED_STREAM,
+            ));
+        }
+
+        let promised_id = frame.promised_id();
+        let (pseudo, fields) = frame.into_parts();
+        let req = crate::server::Peer::convert_poll_message(pseudo, fields, promised_id)?;
+
+        if let Err(e) = frame::PushPromise::validate_request(&req) {
+            use PushPromiseHeaderError::*;
+            match e {
+                NotSafeAndCacheable => proto_err!(
+                    stream:
+                    "recv_push_promise: method {} is not safe and cacheable; promised_id={:?}",
+                    req.method(),
+                    promised_id,
+                ),
+                InvalidContentLength(e) => proto_err!(
+                    stream:
+                    "recv_push_promise; promised request has invalid content-length {:?}; promised_id={:?}",
+                    e,
+                    promised_id,
+                ),
+            }
+            return Err(Error::library_reset(promised_id, Reason::PROTOCOL_ERROR));
+        }
+
+        use super::peer::PollMessage::*;
+        stream
+            .pending_recv
+            .push_back(&mut self.buffer, Event::Headers(Server(req)));
+        stream.notify_recv();
+        Ok(())
+    }
+
+    /// Ensures that `id` is not in the `Idle` state.
+    pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> {
+        if let Ok(next) = self.next_stream_id {
+            if id >= next {
+                tracing::debug!(
+                    "stream ID implicitly closed, PROTOCOL_ERROR; stream={:?}",
+                    id
+                );
+                return Err(Reason::PROTOCOL_ERROR);
+            }
+        }
+        // if next_stream_id is overflowed, that's ok.
+
+        Ok(())
+    }
+
+    /// Handle remote sending an explicit RST_STREAM.
+    pub fn recv_reset(
+        &mut self,
+        frame: frame::Reset,
+        stream: &mut Stream,
+        counts: &mut Counts,
+    ) -> Result<(), Error> {
+        // Reseting a stream that the user hasn't accepted is possible,
+        // but should be done with care. These streams will continue
+        // to take up memory in the accept queue, but will no longer be
+        // counted as "concurrent" streams.
+        //
+        // So, we have a separate limit for these.
+        //
+        // See https://github.com/hyperium/hyper/issues/2877
+        if stream.is_pending_accept {
+            if counts.can_inc_num_remote_reset_streams() {
+                counts.inc_num_remote_reset_streams();
+            } else {
+                tracing::warn!(
+                    "recv_reset; remotely-reset pending-accept streams reached limit ({:?})",
+                    counts.max_remote_reset_streams(),
+                );
+                return Err(Error::library_go_away_data(
+                    Reason::ENHANCE_YOUR_CALM,
+                    "too_many_resets",
+                ));
+            }
+        }
+
+        // Notify the stream
+        stream.state.recv_reset(frame, stream.is_pending_send);
+
+        stream.notify_send();
+        stream.notify_recv();
+
+        Ok(())
+    }
+
+    /// Handle a connection-level error
+    pub fn handle_error(&mut self, err: &proto::Error, stream: &mut Stream) {
+        // Receive an error
+        stream.state.handle_error(err);
+
+        // If a receiver is waiting, notify it
+        stream.notify_send();
+        stream.notify_recv();
+    }
+
+    pub fn go_away(&mut self, last_processed_id: StreamId) {
+        assert!(self.max_stream_id >= last_processed_id);
+        self.max_stream_id = last_processed_id;
+    }
+
+    pub fn recv_eof(&mut self, stream: &mut Stream) {
+        stream.state.recv_eof();
+        stream.notify_send();
+        stream.notify_recv();
+    }
+
+    pub(super) fn clear_recv_buffer(&mut self, stream: &mut Stream) {
+        while stream.pending_recv.pop_front(&mut self.buffer).is_some() {
+            // drop it
+        }
+    }
+
+    /// Get the max ID of streams we can receive.
+    ///
+    /// This gets lowered if we send a GOAWAY frame.
+    pub fn max_stream_id(&self) -> StreamId {
+        self.max_stream_id
+    }
+
+    pub fn next_stream_id(&self) -> Result<StreamId, Error> {
+        if let Ok(id) = self.next_stream_id {
+            Ok(id)
+        } else {
+            Err(Error::library_go_away(Reason::PROTOCOL_ERROR))
+        }
+    }
+
+    pub fn may_have_created_stream(&self, id: StreamId) -> bool {
+        if let Ok(next_id) = self.next_stream_id {
+            // Peer::is_local_init should have been called beforehand
+            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),);
+            id < next_id
+        } else {
+            true
+        }
+    }
+
+    pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) {
+        if let Ok(next_id) = self.next_stream_id {
+            // !Peer::is_local_init should have been called beforehand
+            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated());
+            if id >= next_id {
+                self.next_stream_id = id.next_id();
+            }
+        }
+    }
+
+    /// Returns true if the remote peer can reserve a stream with the given ID.
+    pub fn ensure_can_reserve(&self) -> Result<(), Error> {
+        if !self.is_push_enabled {
+            proto_err!(conn: "recv_push_promise: push is disabled");
+            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+        }
+
+        Ok(())
+    }
+
+    /// Add a locally reset stream to queue to be eventually reaped.
+    pub fn enqueue_reset_expiration(&mut self, stream: &mut store::Ptr, counts: &mut Counts) {
+        if !stream.state.is_local_error() || stream.is_pending_reset_expiration() {
+            return;
+        }
+
+        tracing::trace!("enqueue_reset_expiration; {:?}", stream.id);
+
+        if counts.can_inc_num_reset_streams() {
+            counts.inc_num_reset_streams();
+            self.pending_reset_expired.push(stream);
+        }
+    }
+
+    /// Send any pending refusals.
+    pub fn send_pending_refusal<T, B>(
+        &mut self,
+        cx: &mut Context,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        if let Some(stream_id) = self.refused {
+            ready!(dst.poll_ready(cx))?;
+
+            // Create the RST_STREAM frame
+            let frame = frame::Reset::new(stream_id, Reason::REFUSED_STREAM);
+
+            // Buffer the frame
+            dst.buffer(frame.into()).expect("invalid RST_STREAM frame");
+        }
+
+        self.refused = None;
+
+        Poll::Ready(Ok(()))
+    }
+
+    pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) {
+        if !self.pending_reset_expired.is_empty() {
+            let now = Instant::now();
+            let reset_duration = self.reset_duration;
+            while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| {
+                let reset_at = stream.reset_at.expect("reset_at must be set if in queue");
+                // rust-lang/rust#86470 tracks a bug in the standard library where `Instant`
+                // subtraction can panic (because, on some platforms, `Instant` isn't actually
+                // monotonic). We use a saturating operation to avoid this panic here.
+                now.saturating_duration_since(reset_at) > reset_duration
+            }) {
+                counts.transition_after(stream, true);
+            }
+        }
+    }
+
+    pub fn clear_queues(
+        &mut self,
+        clear_pending_accept: bool,
+        store: &mut Store,
+        counts: &mut Counts,
+    ) {
+        self.clear_stream_window_update_queue(store, counts);
+        self.clear_all_reset_streams(store, counts);
+
+        if clear_pending_accept {
+            self.clear_all_pending_accept(store, counts);
+        }
+    }
+
+    fn clear_stream_window_update_queue(&mut self, store: &mut Store, counts: &mut Counts) {
+        while let Some(stream) = self.pending_window_updates.pop(store) {
+            counts.transition(stream, |_, stream| {
+                tracing::trace!("clear_stream_window_update_queue; stream={:?}", stream.id);
+            })
+        }
+    }
+
+    /// Called on EOF
+    fn clear_all_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) {
+        while let Some(stream) = self.pending_reset_expired.pop(store) {
+            counts.transition_after(stream, true);
+        }
+    }
+
+    fn clear_all_pending_accept(&mut self, store: &mut Store, counts: &mut Counts) {
+        while let Some(stream) = self.pending_accept.pop(store) {
+            counts.transition_after(stream, false);
+        }
+    }
+
+    pub fn poll_complete<T, B>(
+        &mut self,
+        cx: &mut Context,
+        store: &mut Store,
+        counts: &mut Counts,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        // Send any pending connection level window updates
+        ready!(self.send_connection_window_update(cx, dst))?;
+
+        // Send any pending stream level window updates
+        ready!(self.send_stream_window_updates(cx, store, counts, dst))?;
+
+        Poll::Ready(Ok(()))
+    }
+
+    /// Send connection level window update
+    fn send_connection_window_update<T, B>(
+        &mut self,
+        cx: &mut Context,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        if let Some(incr) = self.flow.unclaimed_capacity() {
+            let frame = frame::WindowUpdate::new(StreamId::zero(), incr);
+
+            // Ensure the codec has capacity
+            ready!(dst.poll_ready(cx))?;
+
+            // Buffer the WINDOW_UPDATE frame
+            dst.buffer(frame.into())
+                .expect("invalid WINDOW_UPDATE frame");
+
+            // Update flow control
+            self.flow
+                .inc_window(incr)
+                .expect("unexpected flow control state");
+        }
+
+        Poll::Ready(Ok(()))
+    }
+
+    /// Send stream level window update
+    pub fn send_stream_window_updates<T, B>(
+        &mut self,
+        cx: &mut Context,
+        store: &mut Store,
+        counts: &mut Counts,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        loop {
+            // Ensure the codec has capacity
+            ready!(dst.poll_ready(cx))?;
+
+            // Get the next stream
+            let stream = match self.pending_window_updates.pop(store) {
+                Some(stream) => stream,
+                None => return Poll::Ready(Ok(())),
+            };
+
+            counts.transition(stream, |_, stream| {
+                tracing::trace!("pending_window_updates -- pop; stream={:?}", stream.id);
+                debug_assert!(!stream.is_pending_window_update);
+
+                if !stream.state.is_recv_streaming() {
+                    // No need to send window updates on the stream if the stream is
+                    // no longer receiving data.
+                    //
+                    // TODO: is this correct? We could possibly send a window
+                    // update on a ReservedRemote stream if we already know
+                    // we want to stream the data faster...
+                    return;
+                }
+
+                // TODO: de-dup
+                if let Some(incr) = stream.recv_flow.unclaimed_capacity() {
+                    // Create the WINDOW_UPDATE frame
+                    let frame = frame::WindowUpdate::new(stream.id, incr);
+
+                    // Buffer it
+                    dst.buffer(frame.into())
+                        .expect("invalid WINDOW_UPDATE frame");
+
+                    // Update flow control
+                    stream
+                        .recv_flow
+                        .inc_window(incr)
+                        .expect("unexpected flow control state");
+                }
+            })
+        }
+    }
+
+    pub fn next_incoming(&mut self, store: &mut Store) -> Option<store::Key> {
+        self.pending_accept.pop(store).map(|ptr| ptr.key())
+    }
+
+    pub fn poll_data(
+        &mut self,
+        cx: &Context,
+        stream: &mut Stream,
+    ) -> Poll<Option<Result<Bytes, proto::Error>>> {
+        match stream.pending_recv.pop_front(&mut self.buffer) {
+            Some(Event::Data(payload)) => Poll::Ready(Some(Ok(payload))),
+            Some(event) => {
+                // Frame is trailer
+                stream.pending_recv.push_front(&mut self.buffer, event);
+
+                // Notify the recv task. This is done just in case
+                // `poll_trailers` was called.
+                //
+                // It is very likely that `notify_recv` will just be a no-op (as
+                // the task will be None), so this isn't really much of a
+                // performance concern. It also means we don't have to track
+                // state to see if `poll_trailers` was called before `poll_data`
+                // returned `None`.
+                stream.notify_recv();
+
+                // No more data frames
+                Poll::Ready(None)
+            }
+            None => self.schedule_recv(cx, stream),
+        }
+    }
+
+    pub fn poll_trailers(
+        &mut self,
+        cx: &Context,
+        stream: &mut Stream,
+    ) -> Poll<Option<Result<HeaderMap, proto::Error>>> {
+        match stream.pending_recv.pop_front(&mut self.buffer) {
+            Some(Event::Trailers(trailers)) => Poll::Ready(Some(Ok(trailers))),
+            Some(event) => {
+                // Frame is not trailers.. not ready to poll trailers yet.
+                stream.pending_recv.push_front(&mut self.buffer, event);
+
+                Poll::Pending
+            }
+            None => self.schedule_recv(cx, stream),
+        }
+    }
+
+    fn schedule_recv<T>(
+        &mut self,
+        cx: &Context,
+        stream: &mut Stream,
+    ) -> Poll<Option<Result<T, proto::Error>>> {
+        if stream.state.ensure_recv_open()? {
+            // Request to get notified once more frames arrive
+            stream.recv_task = Some(cx.waker().clone());
+            Poll::Pending
+        } else {
+            // No more frames will be received
+            Poll::Ready(None)
+        }
+    }
+}
+
+// ===== impl Open =====
+
+impl Open {
+    pub fn is_push_promise(&self) -> bool {
+        matches!(*self, Self::PushPromise)
+    }
+}
+
+// ===== impl RecvHeaderBlockError =====
+
+impl<T> From<Error> for RecvHeaderBlockError<T> {
+    fn from(err: Error) -> Self {
+        RecvHeaderBlockError::State(err)
+    }
+}
diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs
new file mode 100644
index 0000000..626e61a
--- /dev/null
+++ b/src/proto/streams/send.rs
@@ -0,0 +1,585 @@
+use super::{
+    store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId,
+    StreamIdOverflow, WindowSize,
+};
+use crate::codec::UserError;
+use crate::frame::{self, Reason};
+use crate::proto::{self, Error, Initiator};
+
+use bytes::Buf;
+use tokio::io::AsyncWrite;
+
+use std::cmp::Ordering;
+use std::io;
+use std::task::{Context, Poll, Waker};
+
+/// Manages state transitions related to outbound frames.
+#[derive(Debug)]
+pub(super) struct Send {
+    /// Stream identifier to use for next initialized stream.
+    next_stream_id: Result<StreamId, StreamIdOverflow>,
+
+    /// Any streams with a higher ID are ignored.
+    ///
+    /// This starts as MAX, but is lowered when a GOAWAY is received.
+    ///
+    /// > After sending a GOAWAY frame, the sender can discard frames for
+    /// > streams initiated by the receiver with identifiers higher than
+    /// > the identified last stream.
+    max_stream_id: StreamId,
+
+    /// Initial window size of locally initiated streams
+    init_window_sz: WindowSize,
+
+    /// Prioritization layer
+    prioritize: Prioritize,
+
+    is_push_enabled: bool,
+
+    /// If extended connect protocol is enabled.
+    is_extended_connect_protocol_enabled: bool,
+}
+
+/// A value to detect which public API has called `poll_reset`.
+#[derive(Debug)]
+pub(crate) enum PollReset {
+    AwaitingHeaders,
+    Streaming,
+}
+
+impl Send {
+    /// Create a new `Send`
+    pub fn new(config: &Config) -> Self {
+        Send {
+            init_window_sz: config.remote_init_window_sz,
+            max_stream_id: StreamId::MAX,
+            next_stream_id: Ok(config.local_next_stream_id),
+            prioritize: Prioritize::new(config),
+            is_push_enabled: true,
+            is_extended_connect_protocol_enabled: false,
+        }
+    }
+
+    /// Returns the initial send window size
+    pub fn init_window_sz(&self) -> WindowSize {
+        self.init_window_sz
+    }
+
+    pub fn open(&mut self) -> Result<StreamId, UserError> {
+        let stream_id = self.ensure_next_stream_id()?;
+        self.next_stream_id = stream_id.next_id();
+        Ok(stream_id)
+    }
+
+    pub fn reserve_local(&mut self) -> Result<StreamId, UserError> {
+        let stream_id = self.ensure_next_stream_id()?;
+        self.next_stream_id = stream_id.next_id();
+        Ok(stream_id)
+    }
+
+    fn check_headers(fields: &http::HeaderMap) -> Result<(), UserError> {
+        // 8.1.2.2. Connection-Specific Header Fields
+        if fields.contains_key(http::header::CONNECTION)
+            || fields.contains_key(http::header::TRANSFER_ENCODING)
+            || fields.contains_key(http::header::UPGRADE)
+            || fields.contains_key("keep-alive")
+            || fields.contains_key("proxy-connection")
+        {
+            tracing::debug!("illegal connection-specific headers found");
+            return Err(UserError::MalformedHeaders);
+        } else if let Some(te) = fields.get(http::header::TE) {
+            if te != "trailers" {
+                tracing::debug!("illegal connection-specific headers found");
+                return Err(UserError::MalformedHeaders);
+            }
+        }
+        Ok(())
+    }
+
+    pub fn send_push_promise<B>(
+        &mut self,
+        frame: frame::PushPromise,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        task: &mut Option<Waker>,
+    ) -> Result<(), UserError> {
+        if !self.is_push_enabled {
+            return Err(UserError::PeerDisabledServerPush);
+        }
+
+        tracing::trace!(
+            "send_push_promise; frame={:?}; init_window={:?}",
+            frame,
+            self.init_window_sz
+        );
+
+        Self::check_headers(frame.fields())?;
+
+        // Queue the frame for sending
+        self.prioritize
+            .queue_frame(frame.into(), buffer, stream, task);
+
+        Ok(())
+    }
+
+    pub fn send_headers<B>(
+        &mut self,
+        frame: frame::Headers,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+        task: &mut Option<Waker>,
+    ) -> Result<(), UserError> {
+        tracing::trace!(
+            "send_headers; frame={:?}; init_window={:?}",
+            frame,
+            self.init_window_sz
+        );
+
+        Self::check_headers(frame.fields())?;
+
+        let end_stream = frame.is_end_stream();
+
+        // Update the state
+        stream.state.send_open(end_stream)?;
+
+        let mut pending_open = false;
+        if counts.peer().is_local_init(frame.stream_id()) && !stream.is_pending_push {
+            self.prioritize.queue_open(stream);
+            pending_open = true;
+        }
+
+        // Queue the frame for sending
+        //
+        // This call expects that, since new streams are in the open queue, new
+        // streams won't be pushed on pending_send.
+        self.prioritize
+            .queue_frame(frame.into(), buffer, stream, task);
+
+        // Need to notify the connection when pushing onto pending_open since
+        // queue_frame only notifies for pending_send.
+        if pending_open {
+            if let Some(task) = task.take() {
+                task.wake();
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Send an explicit RST_STREAM frame
+    pub fn send_reset<B>(
+        &mut self,
+        reason: Reason,
+        initiator: Initiator,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+        task: &mut Option<Waker>,
+    ) {
+        let is_reset = stream.state.is_reset();
+        let is_closed = stream.state.is_closed();
+        let is_empty = stream.pending_send.is_empty();
+        let stream_id = stream.id;
+
+        tracing::trace!(
+            "send_reset(..., reason={:?}, initiator={:?}, stream={:?}, ..., \
+             is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \
+             state={:?} \
+             ",
+            reason,
+            initiator,
+            stream_id,
+            is_reset,
+            is_closed,
+            is_empty,
+            stream.state
+        );
+
+        if is_reset {
+            // Don't double reset
+            tracing::trace!(
+                " -> not sending RST_STREAM ({:?} is already reset)",
+                stream_id
+            );
+            return;
+        }
+
+        // Transition the state to reset no matter what.
+        stream.state.set_reset(stream_id, reason, initiator);
+
+        // If closed AND the send queue is flushed, then the stream cannot be
+        // reset explicitly, either. Implicit resets can still be queued.
+        if is_closed && is_empty {
+            tracing::trace!(
+                " -> not sending explicit RST_STREAM ({:?} was closed \
+                 and send queue was flushed)",
+                stream_id
+            );
+            return;
+        }
+
+        // Clear all pending outbound frames.
+        // Note that we don't call `self.recv_err` because we want to enqueue
+        // the reset frame before transitioning the stream inside
+        // `reclaim_all_capacity`.
+        self.prioritize.clear_queue(buffer, stream);
+
+        let frame = frame::Reset::new(stream.id, reason);
+
+        tracing::trace!("send_reset -- queueing; frame={:?}", frame);
+        self.prioritize
+            .queue_frame(frame.into(), buffer, stream, task);
+        self.prioritize.reclaim_all_capacity(stream, counts);
+    }
+
+    pub fn schedule_implicit_reset(
+        &mut self,
+        stream: &mut store::Ptr,
+        reason: Reason,
+        counts: &mut Counts,
+        task: &mut Option<Waker>,
+    ) {
+        if stream.state.is_closed() {
+            // Stream is already closed, nothing more to do
+            return;
+        }
+
+        stream.state.set_scheduled_reset(reason);
+
+        self.prioritize.reclaim_reserved_capacity(stream, counts);
+        self.prioritize.schedule_send(stream, task);
+    }
+
+    pub fn send_data<B>(
+        &mut self,
+        frame: frame::Data<B>,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+        task: &mut Option<Waker>,
+    ) -> Result<(), UserError>
+    where
+        B: Buf,
+    {
+        self.prioritize
+            .send_data(frame, buffer, stream, counts, task)
+    }
+
+    pub fn send_trailers<B>(
+        &mut self,
+        frame: frame::Headers,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+        task: &mut Option<Waker>,
+    ) -> Result<(), UserError> {
+        // TODO: Should this logic be moved into state.rs?
+        if !stream.state.is_send_streaming() {
+            return Err(UserError::UnexpectedFrameType);
+        }
+
+        stream.state.send_close();
+
+        tracing::trace!("send_trailers -- queuing; frame={:?}", frame);
+        self.prioritize
+            .queue_frame(frame.into(), buffer, stream, task);
+
+        // Release any excess capacity
+        self.prioritize.reserve_capacity(0, stream, counts);
+
+        Ok(())
+    }
+
+    pub fn poll_complete<T, B>(
+        &mut self,
+        cx: &mut Context,
+        buffer: &mut Buffer<Frame<B>>,
+        store: &mut Store,
+        counts: &mut Counts,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        self.prioritize
+            .poll_complete(cx, buffer, store, counts, dst)
+    }
+
+    /// Request capacity to send data
+    pub fn reserve_capacity(
+        &mut self,
+        capacity: WindowSize,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+    ) {
+        self.prioritize.reserve_capacity(capacity, stream, counts)
+    }
+
+    pub fn poll_capacity(
+        &mut self,
+        cx: &Context,
+        stream: &mut store::Ptr,
+    ) -> Poll<Option<Result<WindowSize, UserError>>> {
+        if !stream.state.is_send_streaming() {
+            return Poll::Ready(None);
+        }
+
+        if !stream.send_capacity_inc {
+            stream.wait_send(cx);
+            return Poll::Pending;
+        }
+
+        stream.send_capacity_inc = false;
+
+        Poll::Ready(Some(Ok(self.capacity(stream))))
+    }
+
+    /// Current available stream send capacity
+    pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize {
+        stream.capacity(self.prioritize.max_buffer_size())
+    }
+
+    pub fn poll_reset(
+        &self,
+        cx: &Context,
+        stream: &mut Stream,
+        mode: PollReset,
+    ) -> Poll<Result<Reason, crate::Error>> {
+        match stream.state.ensure_reason(mode)? {
+            Some(reason) => Poll::Ready(Ok(reason)),
+            None => {
+                stream.wait_send(cx);
+                Poll::Pending
+            }
+        }
+    }
+
+    pub fn recv_connection_window_update(
+        &mut self,
+        frame: frame::WindowUpdate,
+        store: &mut Store,
+        counts: &mut Counts,
+    ) -> Result<(), Reason> {
+        self.prioritize
+            .recv_connection_window_update(frame.size_increment(), store, counts)
+    }
+
+    pub fn recv_stream_window_update<B>(
+        &mut self,
+        sz: WindowSize,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+        task: &mut Option<Waker>,
+    ) -> Result<(), Reason> {
+        if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) {
+            tracing::debug!("recv_stream_window_update !!; err={:?}", e);
+
+            self.send_reset(
+                Reason::FLOW_CONTROL_ERROR,
+                Initiator::Library,
+                buffer,
+                stream,
+                counts,
+                task,
+            );
+
+            return Err(e);
+        }
+
+        Ok(())
+    }
+
+    pub(super) fn recv_go_away(&mut self, last_stream_id: StreamId) -> Result<(), Error> {
+        if last_stream_id > self.max_stream_id {
+            // The remote endpoint sent a `GOAWAY` frame indicating a stream
+            // that we never sent, or that we have already terminated on account
+            // of previous `GOAWAY` frame. In either case, that is illegal.
+            // (When sending multiple `GOAWAY`s, "Endpoints MUST NOT increase
+            // the value they send in the last stream identifier, since the
+            // peers might already have retried unprocessed requests on another
+            // connection.")
+            proto_err!(conn:
+                "recv_go_away: last_stream_id ({:?}) > max_stream_id ({:?})",
+                last_stream_id, self.max_stream_id,
+            );
+            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+        }
+
+        self.max_stream_id = last_stream_id;
+        Ok(())
+    }
+
+    pub fn handle_error<B>(
+        &mut self,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+    ) {
+        // Clear all pending outbound frames
+        self.prioritize.clear_queue(buffer, stream);
+        self.prioritize.reclaim_all_capacity(stream, counts);
+    }
+
+    pub fn apply_remote_settings<B>(
+        &mut self,
+        settings: &frame::Settings,
+        buffer: &mut Buffer<Frame<B>>,
+        store: &mut Store,
+        counts: &mut Counts,
+        task: &mut Option<Waker>,
+    ) -> Result<(), Error> {
+        if let Some(val) = settings.is_extended_connect_protocol_enabled() {
+            self.is_extended_connect_protocol_enabled = val;
+        }
+
+        // Applies an update to the remote endpoint's initial window size.
+        //
+        // Per RFC 7540 §6.9.2:
+        //
+        // In addition to changing the flow-control window for streams that are
+        // not yet active, a SETTINGS frame can alter the initial flow-control
+        // window size for streams with active flow-control windows (that is,
+        // streams in the "open" or "half-closed (remote)" state). When the
+        // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust
+        // the size of all stream flow-control windows that it maintains by the
+        // difference between the new value and the old value.
+        //
+        // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available
+        // space in a flow-control window to become negative. A sender MUST
+        // track the negative flow-control window and MUST NOT send new
+        // flow-controlled frames until it receives WINDOW_UPDATE frames that
+        // cause the flow-control window to become positive.
+        if let Some(val) = settings.initial_window_size() {
+            let old_val = self.init_window_sz;
+            self.init_window_sz = val;
+
+            match val.cmp(&old_val) {
+                Ordering::Less => {
+                    // We must decrease the (remote) window on every open stream.
+                    let dec = old_val - val;
+                    tracing::trace!("decrementing all windows; dec={}", dec);
+
+                    let mut total_reclaimed = 0;
+                    store.try_for_each(|mut stream| {
+                        let stream = &mut *stream;
+
+                        tracing::trace!(
+                            "decrementing stream window; id={:?}; decr={}; flow={:?}",
+                            stream.id,
+                            dec,
+                            stream.send_flow
+                        );
+
+                        // TODO: this decrement can underflow based on received frames!
+                        stream
+                            .send_flow
+                            .dec_send_window(dec)
+                            .map_err(proto::Error::library_go_away)?;
+
+                        // It's possible that decreasing the window causes
+                        // `window_size` (the stream-specific window) to fall below
+                        // `available` (the portion of the connection-level window
+                        // that we have allocated to the stream).
+                        // In this case, we should take that excess allocation away
+                        // and reassign it to other streams.
+                        let window_size = stream.send_flow.window_size();
+                        let available = stream.send_flow.available().as_size();
+                        let reclaimed = if available > window_size {
+                            // Drop down to `window_size`.
+                            let reclaim = available - window_size;
+                            stream
+                                .send_flow
+                                .claim_capacity(reclaim)
+                                .map_err(proto::Error::library_go_away)?;
+                            total_reclaimed += reclaim;
+                            reclaim
+                        } else {
+                            0
+                        };
+
+                        tracing::trace!(
+                            "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}",
+                            stream.id,
+                            dec,
+                            reclaimed,
+                            stream.send_flow
+                        );
+
+                        // TODO: Should this notify the producer when the capacity
+                        // of a stream is reduced? Maybe it should if the capacity
+                        // is reduced to zero, allowing the producer to stop work.
+
+                        Ok::<_, proto::Error>(())
+                    })?;
+
+                    self.prioritize
+                        .assign_connection_capacity(total_reclaimed, store, counts);
+                }
+                Ordering::Greater => {
+                    let inc = val - old_val;
+
+                    store.try_for_each(|mut stream| {
+                        self.recv_stream_window_update(inc, buffer, &mut stream, counts, task)
+                            .map_err(Error::library_go_away)
+                    })?;
+                }
+                Ordering::Equal => (),
+            }
+        }
+
+        if let Some(val) = settings.is_push_enabled() {
+            self.is_push_enabled = val
+        }
+
+        Ok(())
+    }
+
+    pub fn clear_queues(&mut self, store: &mut Store, counts: &mut Counts) {
+        self.prioritize.clear_pending_capacity(store, counts);
+        self.prioritize.clear_pending_send(store, counts);
+        self.prioritize.clear_pending_open(store, counts);
+    }
+
+    pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> {
+        if let Ok(next) = self.next_stream_id {
+            if id >= next {
+                return Err(Reason::PROTOCOL_ERROR);
+            }
+        }
+        // if next_stream_id is overflowed, that's ok.
+
+        Ok(())
+    }
+
+    pub fn ensure_next_stream_id(&self) -> Result<StreamId, UserError> {
+        self.next_stream_id
+            .map_err(|_| UserError::OverflowedStreamId)
+    }
+
+    pub fn may_have_created_stream(&self, id: StreamId) -> bool {
+        if let Ok(next_id) = self.next_stream_id {
+            // Peer::is_local_init should have been called beforehand
+            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),);
+            id < next_id
+        } else {
+            true
+        }
+    }
+
+    pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) {
+        if let Ok(next_id) = self.next_stream_id {
+            // Peer::is_local_init should have been called beforehand
+            debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated());
+            if id >= next_id {
+                self.next_stream_id = id.next_id();
+            }
+        }
+    }
+
+    pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
+        self.is_extended_connect_protocol_enabled
+    }
+}
diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs
new file mode 100644
index 0000000..5256f09
--- /dev/null
+++ b/src/proto/streams/state.rs
@@ -0,0 +1,469 @@
+use std::io;
+
+use crate::codec::UserError;
+use crate::frame::{self, Reason, StreamId};
+use crate::proto::{self, Error, Initiator, PollReset};
+
+use self::Inner::*;
+use self::Peer::*;
+
+/// Represents the state of an H2 stream
+///
+/// ```not_rust
+///                              +--------+
+///                      send PP |        | recv PP
+///                     ,--------|  idle  |--------.
+///                    /         |        |         \
+///                   v          +--------+          v
+///            +----------+          |           +----------+
+///            |          |          | send H /  |          |
+///     ,------| reserved |          | recv H    | reserved |------.
+///     |      | (local)  |          |           | (remote) |      |
+///     |      +----------+          v           +----------+      |
+///     |          |             +--------+             |          |
+///     |          |     recv ES |        | send ES     |          |
+///     |   send H |     ,-------|  open  |-------.     | recv H   |
+///     |          |    /        |        |        \    |          |
+///     |          v   v         +--------+         v   v          |
+///     |      +----------+          |           +----------+      |
+///     |      |   half   |          |           |   half   |      |
+///     |      |  closed  |          | send R /  |  closed  |      |
+///     |      | (remote) |          | recv R    | (local)  |      |
+///     |      +----------+          |           +----------+      |
+///     |           |                |                 |           |
+///     |           | send ES /      |       recv ES / |           |
+///     |           | send R /       v        send R / |           |
+///     |           | recv R     +--------+   recv R   |           |
+///     | send R /  `----------->|        |<-----------'  send R / |
+///     | recv R                 | closed |               recv R   |
+///     `----------------------->|        |<----------------------'
+///                              +--------+
+///
+///        send:   endpoint sends this frame
+///        recv:   endpoint receives this frame
+///
+///        H:  HEADERS frame (with implied CONTINUATIONs)
+///        PP: PUSH_PROMISE frame (with implied CONTINUATIONs)
+///        ES: END_STREAM flag
+///        R:  RST_STREAM frame
+/// ```
+#[derive(Debug, Clone)]
+pub struct State {
+    inner: Inner,
+}
+
+#[derive(Debug, Clone)]
+enum Inner {
+    Idle,
+    // TODO: these states shouldn't count against concurrency limits:
+    ReservedLocal,
+    ReservedRemote,
+    Open { local: Peer, remote: Peer },
+    HalfClosedLocal(Peer), // TODO: explicitly name this value
+    HalfClosedRemote(Peer),
+    Closed(Cause),
+}
+
+#[derive(Debug, Copy, Clone, Default)]
+enum Peer {
+    #[default]
+    AwaitingHeaders,
+    Streaming,
+}
+
+#[derive(Debug, Clone)]
+enum Cause {
+    EndStream,
+    Error(Error),
+
+    /// This indicates to the connection that a reset frame must be sent out
+    /// once the send queue has been flushed.
+    ///
+    /// Examples of when this could happen:
+    /// - User drops all references to a stream, so we want to CANCEL the it.
+    /// - Header block size was too large, so we want to REFUSE, possibly
+    ///   after sending a 431 response frame.
+    ScheduledLibraryReset(Reason),
+}
+
+impl State {
+    /// Opens the send-half of a stream if it is not already open.
+    pub fn send_open(&mut self, eos: bool) -> Result<(), UserError> {
+        let local = Streaming;
+
+        self.inner = match self.inner {
+            Idle => {
+                if eos {
+                    HalfClosedLocal(AwaitingHeaders)
+                } else {
+                    Open {
+                        local,
+                        remote: AwaitingHeaders,
+                    }
+                }
+            }
+            Open {
+                local: AwaitingHeaders,
+                remote,
+            } => {
+                if eos {
+                    HalfClosedLocal(remote)
+                } else {
+                    Open { local, remote }
+                }
+            }
+            HalfClosedRemote(AwaitingHeaders) | ReservedLocal => {
+                if eos {
+                    Closed(Cause::EndStream)
+                } else {
+                    HalfClosedRemote(local)
+                }
+            }
+            _ => {
+                // All other transitions result in a protocol error
+                return Err(UserError::UnexpectedFrameType);
+            }
+        };
+
+        Ok(())
+    }
+
+    /// Opens the receive-half of the stream when a HEADERS frame is received.
+    ///
+    /// Returns true if this transitions the state to Open.
+    pub fn recv_open(&mut self, frame: &frame::Headers) -> Result<bool, Error> {
+        let mut initial = false;
+        let eos = frame.is_end_stream();
+
+        self.inner = match self.inner {
+            Idle => {
+                initial = true;
+
+                if eos {
+                    HalfClosedRemote(AwaitingHeaders)
+                } else {
+                    Open {
+                        local: AwaitingHeaders,
+                        remote: if frame.is_informational() {
+                            tracing::trace!("skipping 1xx response headers");
+                            AwaitingHeaders
+                        } else {
+                            Streaming
+                        },
+                    }
+                }
+            }
+            ReservedRemote => {
+                initial = true;
+
+                if eos {
+                    Closed(Cause::EndStream)
+                } else if frame.is_informational() {
+                    tracing::trace!("skipping 1xx response headers");
+                    ReservedRemote
+                } else {
+                    HalfClosedLocal(Streaming)
+                }
+            }
+            Open {
+                local,
+                remote: AwaitingHeaders,
+            } => {
+                if eos {
+                    HalfClosedRemote(local)
+                } else {
+                    Open {
+                        local,
+                        remote: if frame.is_informational() {
+                            tracing::trace!("skipping 1xx response headers");
+                            AwaitingHeaders
+                        } else {
+                            Streaming
+                        },
+                    }
+                }
+            }
+            HalfClosedLocal(AwaitingHeaders) => {
+                if eos {
+                    Closed(Cause::EndStream)
+                } else if frame.is_informational() {
+                    tracing::trace!("skipping 1xx response headers");
+                    HalfClosedLocal(AwaitingHeaders)
+                } else {
+                    HalfClosedLocal(Streaming)
+                }
+            }
+            ref state => {
+                // All other transitions result in a protocol error
+                proto_err!(conn: "recv_open: in unexpected state {:?}", state);
+                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+            }
+        };
+
+        Ok(initial)
+    }
+
+    /// Transition from Idle -> ReservedRemote
+    pub fn reserve_remote(&mut self) -> Result<(), Error> {
+        match self.inner {
+            Idle => {
+                self.inner = ReservedRemote;
+                Ok(())
+            }
+            ref state => {
+                proto_err!(conn: "reserve_remote: in unexpected state {:?}", state);
+                Err(Error::library_go_away(Reason::PROTOCOL_ERROR))
+            }
+        }
+    }
+
+    /// Transition from Idle -> ReservedLocal
+    pub fn reserve_local(&mut self) -> Result<(), UserError> {
+        match self.inner {
+            Idle => {
+                self.inner = ReservedLocal;
+                Ok(())
+            }
+            _ => Err(UserError::UnexpectedFrameType),
+        }
+    }
+
+    /// Indicates that the remote side will not send more data to the local.
+    pub fn recv_close(&mut self) -> Result<(), Error> {
+        match self.inner {
+            Open { local, .. } => {
+                // The remote side will continue to receive data.
+                tracing::trace!("recv_close: Open => HalfClosedRemote({:?})", local);
+                self.inner = HalfClosedRemote(local);
+                Ok(())
+            }
+            HalfClosedLocal(..) => {
+                tracing::trace!("recv_close: HalfClosedLocal => Closed");
+                self.inner = Closed(Cause::EndStream);
+                Ok(())
+            }
+            ref state => {
+                proto_err!(conn: "recv_close: in unexpected state {:?}", state);
+                Err(Error::library_go_away(Reason::PROTOCOL_ERROR))
+            }
+        }
+    }
+
+    /// The remote explicitly sent a RST_STREAM.
+    ///
+    /// # Arguments
+    /// - `frame`: the received RST_STREAM frame.
+    /// - `queued`: true if this stream has frames in the pending send queue.
+    pub fn recv_reset(&mut self, frame: frame::Reset, queued: bool) {
+        match self.inner {
+            // If the stream is already in a `Closed` state, do nothing,
+            // provided that there are no frames still in the send queue.
+            Closed(..) if !queued => {}
+            // A notionally `Closed` stream may still have queued frames in
+            // the following cases:
+            //
+            // - if the cause is `Cause::Scheduled(..)` (i.e. we have not
+            //   actually closed the stream yet).
+            // - if the cause is `Cause::EndStream`: we transition to this
+            //   state when an EOS frame is *enqueued* (so that it's invalid
+            //   to enqueue more frames), not when the EOS frame is *sent*;
+            //   therefore, there may still be frames ahead of the EOS frame
+            //   in the send queue.
+            //
+            // In either of these cases, we want to overwrite the stream's
+            // previous state with the received RST_STREAM, so that the queue
+            // will be cleared by `Prioritize::pop_frame`.
+            ref state => {
+                tracing::trace!(
+                    "recv_reset; frame={:?}; state={:?}; queued={:?}",
+                    frame,
+                    state,
+                    queued
+                );
+                self.inner = Closed(Cause::Error(Error::remote_reset(
+                    frame.stream_id(),
+                    frame.reason(),
+                )));
+            }
+        }
+    }
+
+    /// Handle a connection-level error.
+    pub fn handle_error(&mut self, err: &proto::Error) {
+        match self.inner {
+            Closed(..) => {}
+            _ => {
+                tracing::trace!("handle_error; err={:?}", err);
+                self.inner = Closed(Cause::Error(err.clone()));
+            }
+        }
+    }
+
+    pub fn recv_eof(&mut self) {
+        match self.inner {
+            Closed(..) => {}
+            ref state => {
+                tracing::trace!("recv_eof; state={:?}", state);
+                self.inner = Closed(Cause::Error(
+                    io::Error::new(
+                        io::ErrorKind::BrokenPipe,
+                        "stream closed because of a broken pipe",
+                    )
+                    .into(),
+                ));
+            }
+        }
+    }
+
+    /// Indicates that the local side will not send more data to the local.
+    pub fn send_close(&mut self) {
+        match self.inner {
+            Open { remote, .. } => {
+                // The remote side will continue to receive data.
+                tracing::trace!("send_close: Open => HalfClosedLocal({:?})", remote);
+                self.inner = HalfClosedLocal(remote);
+            }
+            HalfClosedRemote(..) => {
+                tracing::trace!("send_close: HalfClosedRemote => Closed");
+                self.inner = Closed(Cause::EndStream);
+            }
+            ref state => panic!("send_close: unexpected state {:?}", state),
+        }
+    }
+
+    /// Set the stream state to reset locally.
+    pub fn set_reset(&mut self, stream_id: StreamId, reason: Reason, initiator: Initiator) {
+        self.inner = Closed(Cause::Error(Error::Reset(stream_id, reason, initiator)));
+    }
+
+    /// Set the stream state to a scheduled reset.
+    pub fn set_scheduled_reset(&mut self, reason: Reason) {
+        debug_assert!(!self.is_closed());
+        self.inner = Closed(Cause::ScheduledLibraryReset(reason));
+    }
+
+    pub fn get_scheduled_reset(&self) -> Option<Reason> {
+        match self.inner {
+            Closed(Cause::ScheduledLibraryReset(reason)) => Some(reason),
+            _ => None,
+        }
+    }
+
+    pub fn is_scheduled_reset(&self) -> bool {
+        matches!(self.inner, Closed(Cause::ScheduledLibraryReset(..)))
+    }
+
+    pub fn is_local_error(&self) -> bool {
+        match self.inner {
+            Closed(Cause::Error(ref e)) => e.is_local(),
+            Closed(Cause::ScheduledLibraryReset(..)) => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_remote_reset(&self) -> bool {
+        matches!(
+            self.inner,
+            Closed(Cause::Error(Error::Reset(_, _, Initiator::Remote)))
+        )
+    }
+
+    /// Returns true if the stream is already reset.
+    pub fn is_reset(&self) -> bool {
+        match self.inner {
+            Closed(Cause::EndStream) => false,
+            Closed(_) => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_send_streaming(&self) -> bool {
+        matches!(
+            self.inner,
+            Open {
+                local: Streaming,
+                ..
+            } | HalfClosedRemote(Streaming)
+        )
+    }
+
+    /// Returns true when the stream is in a state to receive headers
+    pub fn is_recv_headers(&self) -> bool {
+        matches!(
+            self.inner,
+            Idle | Open {
+                remote: AwaitingHeaders,
+                ..
+            } | HalfClosedLocal(AwaitingHeaders)
+                | ReservedRemote
+        )
+    }
+
+    pub fn is_recv_streaming(&self) -> bool {
+        matches!(
+            self.inner,
+            Open {
+                remote: Streaming,
+                ..
+            } | HalfClosedLocal(Streaming)
+        )
+    }
+
+    pub fn is_closed(&self) -> bool {
+        matches!(self.inner, Closed(_))
+    }
+
+    pub fn is_recv_closed(&self) -> bool {
+        matches!(
+            self.inner,
+            Closed(..) | HalfClosedRemote(..) | ReservedLocal
+        )
+    }
+
+    pub fn is_send_closed(&self) -> bool {
+        matches!(
+            self.inner,
+            Closed(..) | HalfClosedLocal(..) | ReservedRemote
+        )
+    }
+
+    pub fn is_idle(&self) -> bool {
+        matches!(self.inner, Idle)
+    }
+
+    pub fn ensure_recv_open(&self) -> Result<bool, proto::Error> {
+        // TODO: Is this correct?
+        match self.inner {
+            Closed(Cause::Error(ref e)) => Err(e.clone()),
+            Closed(Cause::ScheduledLibraryReset(reason)) => {
+                Err(proto::Error::library_go_away(reason))
+            }
+            Closed(Cause::EndStream) | HalfClosedRemote(..) | ReservedLocal => Ok(false),
+            _ => Ok(true),
+        }
+    }
+
+    /// Returns a reason if the stream has been reset.
+    pub(super) fn ensure_reason(&self, mode: PollReset) -> Result<Option<Reason>, crate::Error> {
+        match self.inner {
+            Closed(Cause::Error(Error::Reset(_, reason, _)))
+            | Closed(Cause::Error(Error::GoAway(_, reason, _)))
+            | Closed(Cause::ScheduledLibraryReset(reason)) => Ok(Some(reason)),
+            Closed(Cause::Error(ref e)) => Err(e.clone().into()),
+            Open {
+                local: Streaming, ..
+            }
+            | HalfClosedRemote(Streaming) => match mode {
+                PollReset::AwaitingHeaders => Err(UserError::PollResetAfterSendResponse.into()),
+                PollReset::Streaming => Ok(None),
+            },
+            _ => Ok(None),
+        }
+    }
+}
+
+impl Default for State {
+    fn default() -> State {
+        State { inner: Inner::Idle }
+    }
+}
diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs
new file mode 100644
index 0000000..35fd6f2
--- /dev/null
+++ b/src/proto/streams/store.rs
@@ -0,0 +1,465 @@
+use super::*;
+
+use indexmap::{self, IndexMap};
+
+use std::convert::Infallible;
+use std::fmt;
+use std::marker::PhantomData;
+use std::ops;
+
+/// Storage for streams
+#[derive(Debug)]
+pub(super) struct Store {
+    slab: slab::Slab<Stream>,
+    ids: IndexMap<StreamId, SlabIndex>,
+}
+
+/// "Pointer" to an entry in the store
+pub(super) struct Ptr<'a> {
+    key: Key,
+    store: &'a mut Store,
+}
+
+/// References an entry in the store.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub(crate) struct Key {
+    index: SlabIndex,
+    /// Keep the stream ID in the key as an ABA guard, since slab indices
+    /// could be re-used with a new stream.
+    stream_id: StreamId,
+}
+
+// We can never have more than `StreamId::MAX` streams in the store,
+// so we can save a smaller index (u32 vs usize).
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+struct SlabIndex(u32);
+
+#[derive(Debug)]
+pub(super) struct Queue<N> {
+    indices: Option<store::Indices>,
+    _p: PhantomData<N>,
+}
+
+pub(super) trait Next {
+    fn next(stream: &Stream) -> Option<Key>;
+
+    fn set_next(stream: &mut Stream, key: Option<Key>);
+
+    fn take_next(stream: &mut Stream) -> Option<Key>;
+
+    fn is_queued(stream: &Stream) -> bool;
+
+    fn set_queued(stream: &mut Stream, val: bool);
+}
+
+/// A linked list
+#[derive(Debug, Clone, Copy)]
+struct Indices {
+    pub head: Key,
+    pub tail: Key,
+}
+
+pub(super) enum Entry<'a> {
+    Occupied(OccupiedEntry<'a>),
+    Vacant(VacantEntry<'a>),
+}
+
+pub(super) struct OccupiedEntry<'a> {
+    ids: indexmap::map::OccupiedEntry<'a, StreamId, SlabIndex>,
+}
+
+pub(super) struct VacantEntry<'a> {
+    ids: indexmap::map::VacantEntry<'a, StreamId, SlabIndex>,
+    slab: &'a mut slab::Slab<Stream>,
+}
+
+pub(super) trait Resolve {
+    fn resolve(&mut self, key: Key) -> Ptr;
+}
+
+// ===== impl Store =====
+
+impl Store {
+    pub fn new() -> Self {
+        Store {
+            slab: slab::Slab::new(),
+            ids: IndexMap::new(),
+        }
+    }
+
+    pub fn find_mut(&mut self, id: &StreamId) -> Option<Ptr> {
+        let index = match self.ids.get(id) {
+            Some(key) => *key,
+            None => return None,
+        };
+
+        Some(Ptr {
+            key: Key {
+                index,
+                stream_id: *id,
+            },
+            store: self,
+        })
+    }
+
+    pub fn insert(&mut self, id: StreamId, val: Stream) -> Ptr {
+        let index = SlabIndex(self.slab.insert(val) as u32);
+        assert!(self.ids.insert(id, index).is_none());
+
+        Ptr {
+            key: Key {
+                index,
+                stream_id: id,
+            },
+            store: self,
+        }
+    }
+
+    pub fn find_entry(&mut self, id: StreamId) -> Entry {
+        use self::indexmap::map::Entry::*;
+
+        match self.ids.entry(id) {
+            Occupied(e) => Entry::Occupied(OccupiedEntry { ids: e }),
+            Vacant(e) => Entry::Vacant(VacantEntry {
+                ids: e,
+                slab: &mut self.slab,
+            }),
+        }
+    }
+
+    #[allow(clippy::blocks_in_conditions)]
+    pub(crate) fn for_each<F>(&mut self, mut f: F)
+    where
+        F: FnMut(Ptr),
+    {
+        match self.try_for_each(|ptr| {
+            f(ptr);
+            Ok::<_, Infallible>(())
+        }) {
+            Ok(()) => (),
+            Err(infallible) => match infallible {},
+        }
+    }
+
+    pub fn try_for_each<F, E>(&mut self, mut f: F) -> Result<(), E>
+    where
+        F: FnMut(Ptr) -> Result<(), E>,
+    {
+        let mut len = self.ids.len();
+        let mut i = 0;
+
+        while i < len {
+            // Get the key by index, this makes the borrow checker happy
+            let (stream_id, index) = {
+                let entry = self.ids.get_index(i).unwrap();
+                (*entry.0, *entry.1)
+            };
+
+            f(Ptr {
+                key: Key { index, stream_id },
+                store: self,
+            })?;
+
+            // TODO: This logic probably could be better...
+            let new_len = self.ids.len();
+
+            if new_len < len {
+                debug_assert!(new_len == len - 1);
+                len -= 1;
+            } else {
+                i += 1;
+            }
+        }
+
+        Ok(())
+    }
+}
+
+impl Resolve for Store {
+    fn resolve(&mut self, key: Key) -> Ptr {
+        Ptr { key, store: self }
+    }
+}
+
+impl ops::Index<Key> for Store {
+    type Output = Stream;
+
+    fn index(&self, key: Key) -> &Self::Output {
+        self.slab
+            .get(key.index.0 as usize)
+            .filter(|s| s.id == key.stream_id)
+            .unwrap_or_else(|| {
+                panic!("dangling store key for stream_id={:?}", key.stream_id);
+            })
+    }
+}
+
+impl ops::IndexMut<Key> for Store {
+    fn index_mut(&mut self, key: Key) -> &mut Self::Output {
+        self.slab
+            .get_mut(key.index.0 as usize)
+            .filter(|s| s.id == key.stream_id)
+            .unwrap_or_else(|| {
+                panic!("dangling store key for stream_id={:?}", key.stream_id);
+            })
+    }
+}
+
+impl Store {
+    #[cfg(feature = "unstable")]
+    pub fn num_active_streams(&self) -> usize {
+        self.ids.len()
+    }
+
+    #[cfg(feature = "unstable")]
+    pub fn num_wired_streams(&self) -> usize {
+        self.slab.len()
+    }
+}
+
+// While running h2 unit/integration tests, enable this debug assertion.
+//
+// In practice, we don't need to ensure this. But the integration tests
+// help to make sure we've cleaned up in cases where we could (like, the
+// runtime isn't suddenly dropping the task for unknown reasons).
+#[cfg(feature = "unstable")]
+impl Drop for Store {
+    fn drop(&mut self) {
+        use std::thread;
+
+        if !thread::panicking() {
+            debug_assert!(self.slab.is_empty());
+        }
+    }
+}
+
+// ===== impl Queue =====
+
+impl<N> Queue<N>
+where
+    N: Next,
+{
+    pub fn new() -> Self {
+        Queue {
+            indices: None,
+            _p: PhantomData,
+        }
+    }
+
+    pub fn take(&mut self) -> Self {
+        Queue {
+            indices: self.indices.take(),
+            _p: PhantomData,
+        }
+    }
+
+    /// Queue the stream.
+    ///
+    /// If the stream is already contained by the list, return `false`.
+    pub fn push(&mut self, stream: &mut store::Ptr) -> bool {
+        tracing::trace!("Queue::push_back");
+
+        if N::is_queued(stream) {
+            tracing::trace!(" -> already queued");
+            return false;
+        }
+
+        N::set_queued(stream, true);
+
+        // The next pointer shouldn't be set
+        debug_assert!(N::next(stream).is_none());
+
+        // Queue the stream
+        match self.indices {
+            Some(ref mut idxs) => {
+                tracing::trace!(" -> existing entries");
+
+                // Update the current tail node to point to `stream`
+                let key = stream.key();
+                N::set_next(&mut stream.resolve(idxs.tail), Some(key));
+
+                // Update the tail pointer
+                idxs.tail = stream.key();
+            }
+            None => {
+                tracing::trace!(" -> first entry");
+                self.indices = Some(store::Indices {
+                    head: stream.key(),
+                    tail: stream.key(),
+                });
+            }
+        }
+
+        true
+    }
+
+    /// Queue the stream
+    ///
+    /// If the stream is already contained by the list, return `false`.
+    pub fn push_front(&mut self, stream: &mut store::Ptr) -> bool {
+        tracing::trace!("Queue::push_front");
+
+        if N::is_queued(stream) {
+            tracing::trace!(" -> already queued");
+            return false;
+        }
+
+        N::set_queued(stream, true);
+
+        // The next pointer shouldn't be set
+        debug_assert!(N::next(stream).is_none());
+
+        // Queue the stream
+        match self.indices {
+            Some(ref mut idxs) => {
+                tracing::trace!(" -> existing entries");
+
+                // Update the provided stream to point to the head node
+                let head_key = stream.resolve(idxs.head).key();
+                N::set_next(stream, Some(head_key));
+
+                // Update the head pointer
+                idxs.head = stream.key();
+            }
+            None => {
+                tracing::trace!(" -> first entry");
+                self.indices = Some(store::Indices {
+                    head: stream.key(),
+                    tail: stream.key(),
+                });
+            }
+        }
+
+        true
+    }
+
+    pub fn pop<'a, R>(&mut self, store: &'a mut R) -> Option<store::Ptr<'a>>
+    where
+        R: Resolve,
+    {
+        if let Some(mut idxs) = self.indices {
+            let mut stream = store.resolve(idxs.head);
+
+            if idxs.head == idxs.tail {
+                assert!(N::next(&stream).is_none());
+                self.indices = None;
+            } else {
+                idxs.head = N::take_next(&mut stream).unwrap();
+                self.indices = Some(idxs);
+            }
+
+            debug_assert!(N::is_queued(&stream));
+            N::set_queued(&mut stream, false);
+
+            return Some(stream);
+        }
+
+        None
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.indices.is_none()
+    }
+
+    pub fn pop_if<'a, R, F>(&mut self, store: &'a mut R, f: F) -> Option<store::Ptr<'a>>
+    where
+        R: Resolve,
+        F: Fn(&Stream) -> bool,
+    {
+        if let Some(idxs) = self.indices {
+            let should_pop = f(&store.resolve(idxs.head));
+            if should_pop {
+                return self.pop(store);
+            }
+        }
+
+        None
+    }
+}
+
+// ===== impl Ptr =====
+
+impl<'a> Ptr<'a> {
+    /// Returns the Key associated with the stream
+    pub fn key(&self) -> Key {
+        self.key
+    }
+
+    pub fn store_mut(&mut self) -> &mut Store {
+        self.store
+    }
+
+    /// Remove the stream from the store
+    pub fn remove(self) -> StreamId {
+        // The stream must have been unlinked before this point
+        debug_assert!(!self.store.ids.contains_key(&self.key.stream_id));
+
+        // Remove the stream state
+        let stream = self.store.slab.remove(self.key.index.0 as usize);
+        assert_eq!(stream.id, self.key.stream_id);
+        stream.id
+    }
+
+    /// Remove the StreamId -> stream state association.
+    ///
+    /// This will effectively remove the stream as far as the H2 protocol is
+    /// concerned.
+    pub fn unlink(&mut self) {
+        let id = self.key.stream_id;
+        self.store.ids.swap_remove(&id);
+    }
+}
+
+impl<'a> Resolve for Ptr<'a> {
+    fn resolve(&mut self, key: Key) -> Ptr {
+        Ptr {
+            key,
+            store: &mut *self.store,
+        }
+    }
+}
+
+impl<'a> ops::Deref for Ptr<'a> {
+    type Target = Stream;
+
+    fn deref(&self) -> &Stream {
+        &self.store[self.key]
+    }
+}
+
+impl<'a> ops::DerefMut for Ptr<'a> {
+    fn deref_mut(&mut self) -> &mut Stream {
+        &mut self.store[self.key]
+    }
+}
+
+impl<'a> fmt::Debug for Ptr<'a> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        (**self).fmt(fmt)
+    }
+}
+
+// ===== impl OccupiedEntry =====
+
+impl<'a> OccupiedEntry<'a> {
+    pub fn key(&self) -> Key {
+        let stream_id = *self.ids.key();
+        let index = *self.ids.get();
+        Key { index, stream_id }
+    }
+}
+
+// ===== impl VacantEntry =====
+
+impl<'a> VacantEntry<'a> {
+    pub fn insert(self, value: Stream) -> Key {
+        // Insert the value in the slab
+        let stream_id = value.id;
+        let index = SlabIndex(self.slab.insert(value) as u32);
+
+        // Insert the handle in the ID map
+        self.ids.insert(index);
+
+        Key { index, stream_id }
+    }
+}
diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs
new file mode 100644
index 0000000..43e3136
--- /dev/null
+++ b/src/proto/streams/stream.rs
@@ -0,0 +1,527 @@
+use super::*;
+
+use std::task::{Context, Waker};
+use std::time::Instant;
+use std::usize;
+
+/// Tracks Stream related state
+///
+/// # Reference counting
+///
+/// There can be a number of outstanding handles to a single Stream. These are
+/// tracked using reference counting. The `ref_count` field represents the
+/// number of outstanding userspace handles that can reach this stream.
+///
+/// It's important to note that when the stream is placed in an internal queue
+/// (such as an accept queue), this is **not** tracked by a reference count.
+/// Thus, `ref_count` can be zero and the stream still has to be kept around.
+#[derive(Debug)]
+pub(super) struct Stream {
+    /// The h2 stream identifier
+    pub id: StreamId,
+
+    /// Current state of the stream
+    pub state: State,
+
+    /// Set to `true` when the stream is counted against the connection's max
+    /// concurrent streams.
+    pub is_counted: bool,
+
+    /// Number of outstanding handles pointing to this stream
+    pub ref_count: usize,
+
+    // ===== Fields related to sending =====
+    /// Next node in the accept linked list
+    pub next_pending_send: Option<store::Key>,
+
+    /// Set to true when the stream is pending accept
+    pub is_pending_send: bool,
+
+    /// Send data flow control
+    pub send_flow: FlowControl,
+
+    /// Amount of send capacity that has been requested, but not yet allocated.
+    pub requested_send_capacity: WindowSize,
+
+    /// Amount of data buffered at the prioritization layer.
+    /// TODO: Technically this could be greater than the window size...
+    pub buffered_send_data: usize,
+
+    /// Task tracking additional send capacity (i.e. window updates).
+    send_task: Option<Waker>,
+
+    /// Frames pending for this stream being sent to the socket
+    pub pending_send: buffer::Deque,
+
+    /// Next node in the linked list of streams waiting for additional
+    /// connection level capacity.
+    pub next_pending_send_capacity: Option<store::Key>,
+
+    /// True if the stream is waiting for outbound connection capacity
+    pub is_pending_send_capacity: bool,
+
+    /// Set to true when the send capacity has been incremented
+    pub send_capacity_inc: bool,
+
+    /// Next node in the open linked list
+    pub next_open: Option<store::Key>,
+
+    /// Set to true when the stream is pending to be opened
+    pub is_pending_open: bool,
+
+    /// Set to true when a push is pending for this stream
+    pub is_pending_push: bool,
+
+    // ===== Fields related to receiving =====
+    /// Next node in the accept linked list
+    pub next_pending_accept: Option<store::Key>,
+
+    /// Set to true when the stream is pending accept
+    pub is_pending_accept: bool,
+
+    /// Receive data flow control
+    pub recv_flow: FlowControl,
+
+    pub in_flight_recv_data: WindowSize,
+
+    /// Next node in the linked list of streams waiting to send window updates.
+    pub next_window_update: Option<store::Key>,
+
+    /// True if the stream is waiting to send a window update
+    pub is_pending_window_update: bool,
+
+    /// The time when this stream may have been locally reset.
+    pub reset_at: Option<Instant>,
+
+    /// Next node in list of reset streams that should expire eventually
+    pub next_reset_expire: Option<store::Key>,
+
+    /// Frames pending for this stream to read
+    pub pending_recv: buffer::Deque,
+
+    /// When the RecvStream drop occurs, no data should be received.
+    pub is_recv: bool,
+
+    /// Task tracking receiving frames
+    pub recv_task: Option<Waker>,
+
+    /// The stream's pending push promises
+    pub pending_push_promises: store::Queue<NextAccept>,
+
+    /// Validate content-length headers
+    pub content_length: ContentLength,
+}
+
+/// State related to validating a stream's content-length
+#[derive(Debug)]
+pub enum ContentLength {
+    Omitted,
+    Head,
+    Remaining(u64),
+}
+
+#[derive(Debug)]
+pub(super) struct NextAccept;
+
+#[derive(Debug)]
+pub(super) struct NextSend;
+
+#[derive(Debug)]
+pub(super) struct NextSendCapacity;
+
+#[derive(Debug)]
+pub(super) struct NextWindowUpdate;
+
+#[derive(Debug)]
+pub(super) struct NextOpen;
+
+#[derive(Debug)]
+pub(super) struct NextResetExpire;
+
+impl Stream {
+    pub fn new(id: StreamId, init_send_window: WindowSize, init_recv_window: WindowSize) -> Stream {
+        let mut send_flow = FlowControl::new();
+        let mut recv_flow = FlowControl::new();
+
+        recv_flow
+            .inc_window(init_recv_window)
+            .expect("invalid initial receive window");
+        // TODO: proper error handling?
+        let _res = recv_flow.assign_capacity(init_recv_window);
+        debug_assert!(_res.is_ok());
+
+        send_flow
+            .inc_window(init_send_window)
+            .expect("invalid initial send window size");
+
+        Stream {
+            id,
+            state: State::default(),
+            ref_count: 0,
+            is_counted: false,
+
+            // ===== Fields related to sending =====
+            next_pending_send: None,
+            is_pending_send: false,
+            send_flow,
+            requested_send_capacity: 0,
+            buffered_send_data: 0,
+            send_task: None,
+            pending_send: buffer::Deque::new(),
+            is_pending_send_capacity: false,
+            next_pending_send_capacity: None,
+            send_capacity_inc: false,
+            is_pending_open: false,
+            next_open: None,
+            is_pending_push: false,
+
+            // ===== Fields related to receiving =====
+            next_pending_accept: None,
+            is_pending_accept: false,
+            recv_flow,
+            in_flight_recv_data: 0,
+            next_window_update: None,
+            is_pending_window_update: false,
+            reset_at: None,
+            next_reset_expire: None,
+            pending_recv: buffer::Deque::new(),
+            is_recv: true,
+            recv_task: None,
+            pending_push_promises: store::Queue::new(),
+            content_length: ContentLength::Omitted,
+        }
+    }
+
+    /// Increment the stream's ref count
+    pub fn ref_inc(&mut self) {
+        assert!(self.ref_count < usize::MAX);
+        self.ref_count += 1;
+    }
+
+    /// Decrements the stream's ref count
+    pub fn ref_dec(&mut self) {
+        assert!(self.ref_count > 0);
+        self.ref_count -= 1;
+    }
+
+    /// Returns true if stream is currently being held for some time because of
+    /// a local reset.
+    pub fn is_pending_reset_expiration(&self) -> bool {
+        self.reset_at.is_some()
+    }
+
+    /// Returns true if frames for this stream are ready to be sent over the wire
+    pub fn is_send_ready(&self) -> bool {
+        // Why do we check pending_open?
+        //
+        // We allow users to call send_request() which schedules a stream to be pending_open
+        // if there is no room according to the concurrency limit (max_send_streams), and we
+        // also allow data to be buffered for send with send_data() if there is no capacity for
+        // the stream to send the data, which attempts to place the stream in pending_send.
+        // If the stream is not open, we don't want the stream to be scheduled for
+        // execution (pending_send). Note that if the stream is in pending_open, it will be
+        // pushed to pending_send when there is room for an open stream.
+        //
+        // In pending_push we track whether a PushPromise still needs to be sent
+        // from a different stream before we can start sending frames on this one.
+        // This is different from the "open" check because reserved streams don't count
+        // toward the concurrency limit.
+        // See https://httpwg.org/specs/rfc7540.html#rfc.section.5.1.2
+        !self.is_pending_open && !self.is_pending_push
+    }
+
+    /// Returns true if the stream is closed
+    pub fn is_closed(&self) -> bool {
+        // The state has fully transitioned to closed.
+        self.state.is_closed() &&
+            // Because outbound frames transition the stream state before being
+            // buffered, we have to ensure that all frames have been flushed.
+            self.pending_send.is_empty() &&
+            // Sometimes large data frames are sent out in chunks. After a chunk
+            // of the frame is sent, the remainder is pushed back onto the send
+            // queue to be rescheduled.
+            //
+            // Checking for additional buffered data lets us catch this case.
+            self.buffered_send_data == 0
+    }
+
+    /// Returns true if the stream is no longer in use
+    pub fn is_released(&self) -> bool {
+        // The stream is closed and fully flushed
+        self.is_closed() &&
+            // There are no more outstanding references to the stream
+            self.ref_count == 0 &&
+            // The stream is not in any queue
+            !self.is_pending_send && !self.is_pending_send_capacity &&
+            !self.is_pending_accept && !self.is_pending_window_update &&
+            !self.is_pending_open && self.reset_at.is_none()
+    }
+
+    /// Returns true when the consumer of the stream has dropped all handles
+    /// (indicating no further interest in the stream) and the stream state is
+    /// not actually closed.
+    ///
+    /// In this case, a reset should be sent.
+    pub fn is_canceled_interest(&self) -> bool {
+        self.ref_count == 0 && !self.state.is_closed()
+    }
+
+    /// Current available stream send capacity
+    pub fn capacity(&self, max_buffer_size: usize) -> WindowSize {
+        let available = self.send_flow.available().as_size() as usize;
+        let buffered = self.buffered_send_data;
+
+        available.min(max_buffer_size).saturating_sub(buffered) as WindowSize
+    }
+
+    pub fn assign_capacity(&mut self, capacity: WindowSize, max_buffer_size: usize) {
+        let prev_capacity = self.capacity(max_buffer_size);
+        debug_assert!(capacity > 0);
+        // TODO: proper error handling
+        let _res = self.send_flow.assign_capacity(capacity);
+        debug_assert!(_res.is_ok());
+
+        tracing::trace!(
+            "  assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}",
+            self.send_flow.available(),
+            self.buffered_send_data,
+            self.id,
+            max_buffer_size,
+            prev_capacity,
+        );
+
+        if prev_capacity < self.capacity(max_buffer_size) {
+            self.notify_capacity();
+        }
+    }
+
+    pub fn send_data(&mut self, len: WindowSize, max_buffer_size: usize) {
+        let prev_capacity = self.capacity(max_buffer_size);
+
+        // TODO: proper error handling
+        let _res = self.send_flow.send_data(len);
+        debug_assert!(_res.is_ok());
+
+        // Decrement the stream's buffered data counter
+        debug_assert!(self.buffered_send_data >= len as usize);
+        self.buffered_send_data -= len as usize;
+        self.requested_send_capacity -= len;
+
+        tracing::trace!(
+            "  sent stream data; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}",
+            self.send_flow.available(),
+            self.buffered_send_data,
+            self.id,
+            max_buffer_size,
+            prev_capacity,
+        );
+
+        if prev_capacity < self.capacity(max_buffer_size) {
+            self.notify_capacity();
+        }
+    }
+
+    /// If the capacity was limited because of the max_send_buffer_size,
+    /// then consider waking the send task again...
+    pub fn notify_capacity(&mut self) {
+        self.send_capacity_inc = true;
+        tracing::trace!("  notifying task");
+        self.notify_send();
+    }
+
+    /// Returns `Err` when the decrement cannot be completed due to overflow.
+    pub fn dec_content_length(&mut self, len: usize) -> Result<(), ()> {
+        match self.content_length {
+            ContentLength::Remaining(ref mut rem) => match rem.checked_sub(len as u64) {
+                Some(val) => *rem = val,
+                None => return Err(()),
+            },
+            ContentLength::Head => {
+                if len != 0 {
+                    return Err(());
+                }
+            }
+            _ => {}
+        }
+
+        Ok(())
+    }
+
+    pub fn ensure_content_length_zero(&self) -> Result<(), ()> {
+        match self.content_length {
+            ContentLength::Remaining(0) => Ok(()),
+            ContentLength::Remaining(_) => Err(()),
+            _ => Ok(()),
+        }
+    }
+
+    pub fn notify_send(&mut self) {
+        if let Some(task) = self.send_task.take() {
+            task.wake();
+        }
+    }
+
+    pub fn wait_send(&mut self, cx: &Context) {
+        self.send_task = Some(cx.waker().clone());
+    }
+
+    pub fn notify_recv(&mut self) {
+        if let Some(task) = self.recv_task.take() {
+            task.wake();
+        }
+    }
+}
+
+impl store::Next for NextAccept {
+    fn next(stream: &Stream) -> Option<store::Key> {
+        stream.next_pending_accept
+    }
+
+    fn set_next(stream: &mut Stream, key: Option<store::Key>) {
+        stream.next_pending_accept = key;
+    }
+
+    fn take_next(stream: &mut Stream) -> Option<store::Key> {
+        stream.next_pending_accept.take()
+    }
+
+    fn is_queued(stream: &Stream) -> bool {
+        stream.is_pending_accept
+    }
+
+    fn set_queued(stream: &mut Stream, val: bool) {
+        stream.is_pending_accept = val;
+    }
+}
+
+impl store::Next for NextSend {
+    fn next(stream: &Stream) -> Option<store::Key> {
+        stream.next_pending_send
+    }
+
+    fn set_next(stream: &mut Stream, key: Option<store::Key>) {
+        stream.next_pending_send = key;
+    }
+
+    fn take_next(stream: &mut Stream) -> Option<store::Key> {
+        stream.next_pending_send.take()
+    }
+
+    fn is_queued(stream: &Stream) -> bool {
+        stream.is_pending_send
+    }
+
+    fn set_queued(stream: &mut Stream, val: bool) {
+        if val {
+            // ensure that stream is not queued for being opened
+            // if it's being put into queue for sending data
+            debug_assert!(!stream.is_pending_open);
+        }
+        stream.is_pending_send = val;
+    }
+}
+
+impl store::Next for NextSendCapacity {
+    fn next(stream: &Stream) -> Option<store::Key> {
+        stream.next_pending_send_capacity
+    }
+
+    fn set_next(stream: &mut Stream, key: Option<store::Key>) {
+        stream.next_pending_send_capacity = key;
+    }
+
+    fn take_next(stream: &mut Stream) -> Option<store::Key> {
+        stream.next_pending_send_capacity.take()
+    }
+
+    fn is_queued(stream: &Stream) -> bool {
+        stream.is_pending_send_capacity
+    }
+
+    fn set_queued(stream: &mut Stream, val: bool) {
+        stream.is_pending_send_capacity = val;
+    }
+}
+
+impl store::Next for NextWindowUpdate {
+    fn next(stream: &Stream) -> Option<store::Key> {
+        stream.next_window_update
+    }
+
+    fn set_next(stream: &mut Stream, key: Option<store::Key>) {
+        stream.next_window_update = key;
+    }
+
+    fn take_next(stream: &mut Stream) -> Option<store::Key> {
+        stream.next_window_update.take()
+    }
+
+    fn is_queued(stream: &Stream) -> bool {
+        stream.is_pending_window_update
+    }
+
+    fn set_queued(stream: &mut Stream, val: bool) {
+        stream.is_pending_window_update = val;
+    }
+}
+
+impl store::Next for NextOpen {
+    fn next(stream: &Stream) -> Option<store::Key> {
+        stream.next_open
+    }
+
+    fn set_next(stream: &mut Stream, key: Option<store::Key>) {
+        stream.next_open = key;
+    }
+
+    fn take_next(stream: &mut Stream) -> Option<store::Key> {
+        stream.next_open.take()
+    }
+
+    fn is_queued(stream: &Stream) -> bool {
+        stream.is_pending_open
+    }
+
+    fn set_queued(stream: &mut Stream, val: bool) {
+        if val {
+            // ensure that stream is not queued for being sent
+            // if it's being put into queue for opening the stream
+            debug_assert!(!stream.is_pending_send);
+        }
+        stream.is_pending_open = val;
+    }
+}
+
+impl store::Next for NextResetExpire {
+    fn next(stream: &Stream) -> Option<store::Key> {
+        stream.next_reset_expire
+    }
+
+    fn set_next(stream: &mut Stream, key: Option<store::Key>) {
+        stream.next_reset_expire = key;
+    }
+
+    fn take_next(stream: &mut Stream) -> Option<store::Key> {
+        stream.next_reset_expire.take()
+    }
+
+    fn is_queued(stream: &Stream) -> bool {
+        stream.reset_at.is_some()
+    }
+
+    fn set_queued(stream: &mut Stream, val: bool) {
+        if val {
+            stream.reset_at = Some(Instant::now());
+        } else {
+            stream.reset_at = None;
+        }
+    }
+}
+
+// ===== impl ContentLength =====
+
+impl ContentLength {
+    pub fn is_head(&self) -> bool {
+        matches!(*self, Self::Head)
+    }
+}
diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs
new file mode 100644
index 0000000..fa8e684
--- /dev/null
+++ b/src/proto/streams/streams.rs
@@ -0,0 +1,1625 @@
+use super::recv::RecvHeaderBlockError;
+use super::store::{self, Entry, Resolve, Store};
+use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId};
+use crate::codec::{Codec, SendError, UserError};
+use crate::ext::Protocol;
+use crate::frame::{self, Frame, Reason};
+use crate::proto::{peer, Error, Initiator, Open, Peer, WindowSize};
+use crate::{client, proto, server};
+
+use bytes::{Buf, Bytes};
+use http::{HeaderMap, Request, Response};
+use std::task::{Context, Poll, Waker};
+use tokio::io::AsyncWrite;
+
+use std::sync::{Arc, Mutex};
+use std::{fmt, io};
+
+#[derive(Debug)]
+pub(crate) struct Streams<B, P>
+where
+    P: Peer,
+{
+    /// Holds most of the connection and stream related state for processing
+    /// HTTP/2 frames associated with streams.
+    inner: Arc<Mutex<Inner>>,
+
+    /// This is the queue of frames to be written to the wire. This is split out
+    /// to avoid requiring a `B` generic on all public API types even if `B` is
+    /// not technically required.
+    ///
+    /// Currently, splitting this out requires a second `Arc` + `Mutex`.
+    /// However, it should be possible to avoid this duplication with a little
+    /// bit of unsafe code. This optimization has been postponed until it has
+    /// been shown to be necessary.
+    send_buffer: Arc<SendBuffer<B>>,
+
+    _p: ::std::marker::PhantomData<P>,
+}
+
+// Like `Streams` but with a `peer::Dyn` field instead of a static `P: Peer` type parameter.
+// Ensures that the methods only get one instantiation, instead of two (client and server)
+#[derive(Debug)]
+pub(crate) struct DynStreams<'a, B> {
+    inner: &'a Mutex<Inner>,
+
+    send_buffer: &'a SendBuffer<B>,
+
+    peer: peer::Dyn,
+}
+
+/// Reference to the stream state
+#[derive(Debug)]
+pub(crate) struct StreamRef<B> {
+    opaque: OpaqueStreamRef,
+    send_buffer: Arc<SendBuffer<B>>,
+}
+
+/// Reference to the stream state that hides the send data chunk generic
+pub(crate) struct OpaqueStreamRef {
+    inner: Arc<Mutex<Inner>>,
+    key: store::Key,
+}
+
+/// Fields needed to manage state related to managing the set of streams. This
+/// is mostly split out to make ownership happy.
+///
+/// TODO: better name
+#[derive(Debug)]
+struct Inner {
+    /// Tracks send & recv stream concurrency.
+    counts: Counts,
+
+    /// Connection level state and performs actions on streams
+    actions: Actions,
+
+    /// Stores stream state
+    store: Store,
+
+    /// The number of stream refs to this shared state.
+    refs: usize,
+}
+
+#[derive(Debug)]
+struct Actions {
+    /// Manages state transitions initiated by receiving frames
+    recv: Recv,
+
+    /// Manages state transitions initiated by sending frames
+    send: Send,
+
+    /// Task that calls `poll_complete`.
+    task: Option<Waker>,
+
+    /// If the connection errors, a copy is kept for any StreamRefs.
+    conn_error: Option<proto::Error>,
+}
+
+/// Contains the buffer of frames to be written to the wire.
+#[derive(Debug)]
+struct SendBuffer<B> {
+    inner: Mutex<Buffer<Frame<B>>>,
+}
+
+// ===== impl Streams =====
+
+impl<B, P> Streams<B, P>
+where
+    B: Buf,
+    P: Peer,
+{
+    pub fn new(config: Config) -> Self {
+        let peer = P::r#dyn();
+
+        Streams {
+            inner: Inner::new(peer, config),
+            send_buffer: Arc::new(SendBuffer::new()),
+            _p: ::std::marker::PhantomData,
+        }
+    }
+
+    pub fn set_target_connection_window_size(&mut self, size: WindowSize) -> Result<(), Reason> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        me.actions
+            .recv
+            .set_target_connection_window(size, &mut me.actions.task)
+    }
+
+    pub fn next_incoming(&mut self) -> Option<StreamRef<B>> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+        me.actions.recv.next_incoming(&mut me.store).map(|key| {
+            let stream = &mut me.store.resolve(key);
+            tracing::trace!(
+                "next_incoming; id={:?}, state={:?}",
+                stream.id,
+                stream.state
+            );
+            // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding
+            // the lock, so it can't.
+            me.refs += 1;
+
+            // Pending-accepted remotely-reset streams are counted.
+            if stream.state.is_remote_reset() {
+                me.counts.dec_num_remote_reset_streams();
+            }
+
+            StreamRef {
+                opaque: OpaqueStreamRef::new(self.inner.clone(), stream),
+                send_buffer: self.send_buffer.clone(),
+            }
+        })
+    }
+
+    pub fn send_pending_refusal<T>(
+        &mut self,
+        cx: &mut Context,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+    {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+        me.actions.recv.send_pending_refusal(cx, dst)
+    }
+
+    pub fn clear_expired_reset_streams(&mut self) {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+        me.actions
+            .recv
+            .clear_expired_reset_streams(&mut me.store, &mut me.counts);
+    }
+
+    pub fn poll_complete<T>(
+        &mut self,
+        cx: &mut Context,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+    {
+        let mut me = self.inner.lock().unwrap();
+        me.poll_complete(&self.send_buffer, cx, dst)
+    }
+
+    pub fn apply_remote_settings(
+        &mut self,
+        frame: &frame::Settings,
+        is_initial: bool,
+    ) -> Result<(), Error> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        me.counts.apply_remote_settings(frame, is_initial);
+
+        me.actions.send.apply_remote_settings(
+            frame,
+            send_buffer,
+            &mut me.store,
+            &mut me.counts,
+            &mut me.actions.task,
+        )
+    }
+
+    pub fn apply_local_settings(&mut self, frame: &frame::Settings) -> Result<(), Error> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        me.actions.recv.apply_local_settings(frame, &mut me.store)
+    }
+
+    pub fn send_request(
+        &mut self,
+        mut request: Request<()>,
+        end_of_stream: bool,
+        pending: Option<&OpaqueStreamRef>,
+    ) -> Result<(StreamRef<B>, bool), SendError> {
+        use super::stream::ContentLength;
+        use http::Method;
+
+        let protocol = request.extensions_mut().remove::<Protocol>();
+
+        // Clear before taking lock, incase extensions contain a StreamRef.
+        request.extensions_mut().clear();
+
+        // TODO: There is a hazard with assigning a stream ID before the
+        // prioritize layer. If prioritization reorders new streams, this
+        // implicitly closes the earlier stream IDs.
+        //
+        // See: hyperium/h2#11
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        me.actions.ensure_no_conn_error()?;
+        me.actions.send.ensure_next_stream_id()?;
+
+        // The `pending` argument is provided by the `Client`, and holds
+        // a store `Key` of a `Stream` that may have been not been opened
+        // yet.
+        //
+        // If that stream is still pending, the Client isn't allowed to
+        // queue up another pending stream. They should use `poll_ready`.
+        if let Some(stream) = pending {
+            if me.store.resolve(stream.key).is_pending_open {
+                return Err(UserError::Rejected.into());
+            }
+        }
+
+        if me.counts.peer().is_server() {
+            // Servers cannot open streams. PushPromise must first be reserved.
+            return Err(UserError::UnexpectedFrameType.into());
+        }
+
+        let stream_id = me.actions.send.open()?;
+
+        let mut stream = Stream::new(
+            stream_id,
+            me.actions.send.init_window_sz(),
+            me.actions.recv.init_window_sz(),
+        );
+
+        if *request.method() == Method::HEAD {
+            stream.content_length = ContentLength::Head;
+        }
+
+        // Convert the message
+        let headers =
+            client::Peer::convert_send_message(stream_id, request, protocol, end_of_stream)?;
+
+        let mut stream = me.store.insert(stream.id, stream);
+
+        let sent = me.actions.send.send_headers(
+            headers,
+            send_buffer,
+            &mut stream,
+            &mut me.counts,
+            &mut me.actions.task,
+        );
+
+        // send_headers can return a UserError, if it does,
+        // we should forget about this stream.
+        if let Err(err) = sent {
+            stream.unlink();
+            stream.remove();
+            return Err(err.into());
+        }
+
+        // Given that the stream has been initialized, it should not be in the
+        // closed state.
+        debug_assert!(!stream.state.is_closed());
+
+        // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding
+        // the lock, so it can't.
+        me.refs += 1;
+
+        let is_full = me.counts.next_send_stream_will_reach_capacity();
+        Ok((
+            StreamRef {
+                opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream),
+                send_buffer: self.send_buffer.clone(),
+            },
+            is_full,
+        ))
+    }
+
+    pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
+        self.inner
+            .lock()
+            .unwrap()
+            .actions
+            .send
+            .is_extended_connect_protocol_enabled()
+    }
+}
+
+impl<B> DynStreams<'_, B> {
+    pub fn is_buffer_empty(&self) -> bool {
+        self.send_buffer.is_empty()
+    }
+
+    pub fn is_server(&self) -> bool {
+        self.peer.is_server()
+    }
+
+    pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), Error> {
+        let mut me = self.inner.lock().unwrap();
+
+        me.recv_headers(self.peer, self.send_buffer, frame)
+    }
+
+    pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), Error> {
+        let mut me = self.inner.lock().unwrap();
+        me.recv_data(self.peer, self.send_buffer, frame)
+    }
+
+    pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), Error> {
+        let mut me = self.inner.lock().unwrap();
+
+        me.recv_reset(self.send_buffer, frame)
+    }
+
+    /// Notify all streams that a connection-level error happened.
+    pub fn handle_error(&mut self, err: proto::Error) -> StreamId {
+        let mut me = self.inner.lock().unwrap();
+        me.handle_error(self.send_buffer, err)
+    }
+
+    pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), Error> {
+        let mut me = self.inner.lock().unwrap();
+        me.recv_go_away(self.send_buffer, frame)
+    }
+
+    pub fn last_processed_id(&self) -> StreamId {
+        self.inner.lock().unwrap().actions.recv.last_processed_id()
+    }
+
+    pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), Error> {
+        let mut me = self.inner.lock().unwrap();
+        me.recv_window_update(self.send_buffer, frame)
+    }
+
+    pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), Error> {
+        let mut me = self.inner.lock().unwrap();
+        me.recv_push_promise(self.send_buffer, frame)
+    }
+
+    pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> {
+        let mut me = self.inner.lock().map_err(|_| ())?;
+        me.recv_eof(self.send_buffer, clear_pending_accept)
+    }
+
+    pub fn send_reset(&mut self, id: StreamId, reason: Reason) {
+        let mut me = self.inner.lock().unwrap();
+        me.send_reset(self.send_buffer, id, reason)
+    }
+
+    pub fn send_go_away(&mut self, last_processed_id: StreamId) {
+        let mut me = self.inner.lock().unwrap();
+        me.actions.recv.go_away(last_processed_id);
+    }
+}
+
+impl Inner {
+    fn new(peer: peer::Dyn, config: Config) -> Arc<Mutex<Self>> {
+        Arc::new(Mutex::new(Inner {
+            counts: Counts::new(peer, &config),
+            actions: Actions {
+                recv: Recv::new(peer, &config),
+                send: Send::new(&config),
+                task: None,
+                conn_error: None,
+            },
+            store: Store::new(),
+            refs: 1,
+        }))
+    }
+
+    fn recv_headers<B>(
+        &mut self,
+        peer: peer::Dyn,
+        send_buffer: &SendBuffer<B>,
+        frame: frame::Headers,
+    ) -> Result<(), Error> {
+        let id = frame.stream_id();
+
+        // The GOAWAY process has begun. All streams with a greater ID than
+        // specified as part of GOAWAY should be ignored.
+        if id > self.actions.recv.max_stream_id() {
+            tracing::trace!(
+                "id ({:?}) > max_stream_id ({:?}), ignoring HEADERS",
+                id,
+                self.actions.recv.max_stream_id()
+            );
+            return Ok(());
+        }
+
+        let key = match self.store.find_entry(id) {
+            Entry::Occupied(e) => e.key(),
+            Entry::Vacant(e) => {
+                // Client: it's possible to send a request, and then send
+                // a RST_STREAM while the response HEADERS were in transit.
+                //
+                // Server: we can't reset a stream before having received
+                // the request headers, so don't allow.
+                if !peer.is_server() {
+                    // This may be response headers for a stream we've already
+                    // forgotten about...
+                    if self.actions.may_have_forgotten_stream(peer, id) {
+                        tracing::debug!(
+                            "recv_headers for old stream={:?}, sending STREAM_CLOSED",
+                            id,
+                        );
+                        return Err(Error::library_reset(id, Reason::STREAM_CLOSED));
+                    }
+                }
+
+                match self
+                    .actions
+                    .recv
+                    .open(id, Open::Headers, &mut self.counts)?
+                {
+                    Some(stream_id) => {
+                        let stream = Stream::new(
+                            stream_id,
+                            self.actions.send.init_window_sz(),
+                            self.actions.recv.init_window_sz(),
+                        );
+
+                        e.insert(stream)
+                    }
+                    None => return Ok(()),
+                }
+            }
+        };
+
+        let stream = self.store.resolve(key);
+
+        if stream.state.is_local_error() {
+            // Locally reset streams must ignore frames "for some time".
+            // This is because the remote may have sent trailers before
+            // receiving the RST_STREAM frame.
+            tracing::trace!("recv_headers; ignoring trailers on {:?}", stream.id);
+            return Ok(());
+        }
+
+        let actions = &mut self.actions;
+        let mut send_buffer = send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        self.counts.transition(stream, |counts, stream| {
+            tracing::trace!(
+                "recv_headers; stream={:?}; state={:?}",
+                stream.id,
+                stream.state
+            );
+
+            let res = if stream.state.is_recv_headers() {
+                match actions.recv.recv_headers(frame, stream, counts) {
+                    Ok(()) => Ok(()),
+                    Err(RecvHeaderBlockError::Oversize(resp)) => {
+                        if let Some(resp) = resp {
+                            let sent = actions.send.send_headers(
+                                resp, send_buffer, stream, counts, &mut actions.task);
+                            debug_assert!(sent.is_ok(), "oversize response should not fail");
+
+                            actions.send.schedule_implicit_reset(
+                                stream,
+                                Reason::REFUSED_STREAM,
+                                counts,
+                                &mut actions.task);
+
+                            actions.recv.enqueue_reset_expiration(stream, counts);
+
+                            Ok(())
+                        } else {
+                            Err(Error::library_reset(stream.id, Reason::REFUSED_STREAM))
+                        }
+                    },
+                    Err(RecvHeaderBlockError::State(err)) => Err(err),
+                }
+            } else {
+                if !frame.is_end_stream() {
+                    // Receiving trailers that don't set EOS is a "malformed"
+                    // message. Malformed messages are a stream error.
+                    proto_err!(stream: "recv_headers: trailers frame was not EOS; stream={:?}", stream.id);
+                    return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR));
+                }
+
+                actions.recv.recv_trailers(frame, stream)
+            };
+
+            actions.reset_on_recv_stream_err(send_buffer, stream, counts, res)
+        })
+    }
+
+    fn recv_data<B>(
+        &mut self,
+        peer: peer::Dyn,
+        send_buffer: &SendBuffer<B>,
+        frame: frame::Data,
+    ) -> Result<(), Error> {
+        let id = frame.stream_id();
+
+        let stream = match self.store.find_mut(&id) {
+            Some(stream) => stream,
+            None => {
+                // The GOAWAY process has begun. All streams with a greater ID
+                // than specified as part of GOAWAY should be ignored.
+                if id > self.actions.recv.max_stream_id() {
+                    tracing::trace!(
+                        "id ({:?}) > max_stream_id ({:?}), ignoring DATA",
+                        id,
+                        self.actions.recv.max_stream_id()
+                    );
+                    return Ok(());
+                }
+
+                if self.actions.may_have_forgotten_stream(peer, id) {
+                    tracing::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,);
+
+                    let sz = frame.payload().len();
+                    // This should have been enforced at the codec::FramedRead layer, so
+                    // this is just a sanity check.
+                    assert!(sz <= super::MAX_WINDOW_SIZE as usize);
+                    let sz = sz as WindowSize;
+
+                    self.actions.recv.ignore_data(sz)?;
+                    return Err(Error::library_reset(id, Reason::STREAM_CLOSED));
+                }
+
+                proto_err!(conn: "recv_data: stream not found; id={:?}", id);
+                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+            }
+        };
+
+        let actions = &mut self.actions;
+        let mut send_buffer = send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        self.counts.transition(stream, |counts, stream| {
+            let sz = frame.payload().len();
+            let res = actions.recv.recv_data(frame, stream);
+
+            // Any stream error after receiving a DATA frame means
+            // we won't give the data to the user, and so they can't
+            // release the capacity. We do it automatically.
+            if let Err(Error::Reset(..)) = res {
+                actions
+                    .recv
+                    .release_connection_capacity(sz as WindowSize, &mut None);
+            }
+            actions.reset_on_recv_stream_err(send_buffer, stream, counts, res)
+        })
+    }
+
+    fn recv_reset<B>(
+        &mut self,
+        send_buffer: &SendBuffer<B>,
+        frame: frame::Reset,
+    ) -> Result<(), Error> {
+        let id = frame.stream_id();
+
+        if id.is_zero() {
+            proto_err!(conn: "recv_reset: invalid stream ID 0");
+            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+        }
+
+        // The GOAWAY process has begun. All streams with a greater ID than
+        // specified as part of GOAWAY should be ignored.
+        if id > self.actions.recv.max_stream_id() {
+            tracing::trace!(
+                "id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM",
+                id,
+                self.actions.recv.max_stream_id()
+            );
+            return Ok(());
+        }
+
+        let stream = match self.store.find_mut(&id) {
+            Some(stream) => stream,
+            None => {
+                // TODO: Are there other error cases?
+                self.actions
+                    .ensure_not_idle(self.counts.peer(), id)
+                    .map_err(Error::library_go_away)?;
+
+                return Ok(());
+            }
+        };
+
+        let mut send_buffer = send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        let actions = &mut self.actions;
+
+        self.counts.transition(stream, |counts, stream| {
+            actions.recv.recv_reset(frame, stream, counts)?;
+            actions.send.handle_error(send_buffer, stream, counts);
+            assert!(stream.state.is_closed());
+            Ok(())
+        })
+    }
+
+    fn recv_window_update<B>(
+        &mut self,
+        send_buffer: &SendBuffer<B>,
+        frame: frame::WindowUpdate,
+    ) -> Result<(), Error> {
+        let id = frame.stream_id();
+
+        let mut send_buffer = send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        if id.is_zero() {
+            self.actions
+                .send
+                .recv_connection_window_update(frame, &mut self.store, &mut self.counts)
+                .map_err(Error::library_go_away)?;
+        } else {
+            // The remote may send window updates for streams that the local now
+            // considers closed. It's ok...
+            if let Some(mut stream) = self.store.find_mut(&id) {
+                // This result is ignored as there is nothing to do when there
+                // is an error. The stream is reset by the function on error and
+                // the error is informational.
+                let _ = self.actions.send.recv_stream_window_update(
+                    frame.size_increment(),
+                    send_buffer,
+                    &mut stream,
+                    &mut self.counts,
+                    &mut self.actions.task,
+                );
+            } else {
+                self.actions
+                    .ensure_not_idle(self.counts.peer(), id)
+                    .map_err(Error::library_go_away)?;
+            }
+        }
+
+        Ok(())
+    }
+
+    fn handle_error<B>(&mut self, send_buffer: &SendBuffer<B>, err: proto::Error) -> StreamId {
+        let actions = &mut self.actions;
+        let counts = &mut self.counts;
+        let mut send_buffer = send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        let last_processed_id = actions.recv.last_processed_id();
+
+        self.store.for_each(|stream| {
+            counts.transition(stream, |counts, stream| {
+                actions.recv.handle_error(&err, &mut *stream);
+                actions.send.handle_error(send_buffer, stream, counts);
+            })
+        });
+
+        actions.conn_error = Some(err);
+
+        last_processed_id
+    }
+
+    fn recv_go_away<B>(
+        &mut self,
+        send_buffer: &SendBuffer<B>,
+        frame: &frame::GoAway,
+    ) -> Result<(), Error> {
+        let actions = &mut self.actions;
+        let counts = &mut self.counts;
+        let mut send_buffer = send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        let last_stream_id = frame.last_stream_id();
+
+        actions.send.recv_go_away(last_stream_id)?;
+
+        let err = Error::remote_go_away(frame.debug_data().clone(), frame.reason());
+
+        self.store.for_each(|stream| {
+            if stream.id > last_stream_id {
+                counts.transition(stream, |counts, stream| {
+                    actions.recv.handle_error(&err, &mut *stream);
+                    actions.send.handle_error(send_buffer, stream, counts);
+                })
+            }
+        });
+
+        actions.conn_error = Some(err);
+
+        Ok(())
+    }
+
+    fn recv_push_promise<B>(
+        &mut self,
+        send_buffer: &SendBuffer<B>,
+        frame: frame::PushPromise,
+    ) -> Result<(), Error> {
+        let id = frame.stream_id();
+        let promised_id = frame.promised_id();
+
+        // First, ensure that the initiating stream is still in a valid state.
+        let parent_key = match self.store.find_mut(&id) {
+            Some(stream) => {
+                // The GOAWAY process has begun. All streams with a greater ID
+                // than specified as part of GOAWAY should be ignored.
+                if id > self.actions.recv.max_stream_id() {
+                    tracing::trace!(
+                        "id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE",
+                        id,
+                        self.actions.recv.max_stream_id()
+                    );
+                    return Ok(());
+                }
+
+                // The stream must be receive open
+                if !stream.state.ensure_recv_open()? {
+                    proto_err!(conn: "recv_push_promise: initiating stream is not opened");
+                    return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+                }
+
+                stream.key()
+            }
+            None => {
+                proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state");
+                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
+            }
+        };
+
+        // TODO: Streams in the reserved states do not count towards the concurrency
+        // limit. However, it seems like there should be a cap otherwise this
+        // could grow in memory indefinitely.
+
+        // Ensure that we can reserve streams
+        self.actions.recv.ensure_can_reserve()?;
+
+        // Next, open the stream.
+        //
+        // If `None` is returned, then the stream is being refused. There is no
+        // further work to be done.
+        if self
+            .actions
+            .recv
+            .open(promised_id, Open::PushPromise, &mut self.counts)?
+            .is_none()
+        {
+            return Ok(());
+        }
+
+        // Try to handle the frame and create a corresponding key for the pushed stream
+        // this requires a bit of indirection to make the borrow checker happy.
+        let child_key: Option<store::Key> = {
+            // Create state for the stream
+            let stream = self.store.insert(promised_id, {
+                Stream::new(
+                    promised_id,
+                    self.actions.send.init_window_sz(),
+                    self.actions.recv.init_window_sz(),
+                )
+            });
+
+            let actions = &mut self.actions;
+
+            self.counts.transition(stream, |counts, stream| {
+                let stream_valid = actions.recv.recv_push_promise(frame, stream);
+
+                match stream_valid {
+                    Ok(()) => Ok(Some(stream.key())),
+                    _ => {
+                        let mut send_buffer = send_buffer.inner.lock().unwrap();
+                        actions
+                            .reset_on_recv_stream_err(
+                                &mut *send_buffer,
+                                stream,
+                                counts,
+                                stream_valid,
+                            )
+                            .map(|()| None)
+                    }
+                }
+            })?
+        };
+        // If we're successful, push the headers and stream...
+        if let Some(child) = child_key {
+            let mut ppp = self.store[parent_key].pending_push_promises.take();
+            ppp.push(&mut self.store.resolve(child));
+
+            let parent = &mut self.store.resolve(parent_key);
+            parent.pending_push_promises = ppp;
+            parent.notify_recv();
+        };
+
+        Ok(())
+    }
+
+    fn recv_eof<B>(
+        &mut self,
+        send_buffer: &SendBuffer<B>,
+        clear_pending_accept: bool,
+    ) -> Result<(), ()> {
+        let actions = &mut self.actions;
+        let counts = &mut self.counts;
+        let mut send_buffer = send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        if actions.conn_error.is_none() {
+            actions.conn_error = Some(
+                io::Error::new(
+                    io::ErrorKind::BrokenPipe,
+                    "connection closed because of a broken pipe",
+                )
+                .into(),
+            );
+        }
+
+        tracing::trace!("Streams::recv_eof");
+
+        self.store.for_each(|stream| {
+            counts.transition(stream, |counts, stream| {
+                actions.recv.recv_eof(stream);
+
+                // This handles resetting send state associated with the
+                // stream
+                actions.send.handle_error(send_buffer, stream, counts);
+            })
+        });
+
+        actions.clear_queues(clear_pending_accept, &mut self.store, counts);
+        Ok(())
+    }
+
+    fn poll_complete<T, B>(
+        &mut self,
+        send_buffer: &SendBuffer<B>,
+        cx: &mut Context,
+        dst: &mut Codec<T, Prioritized<B>>,
+    ) -> Poll<io::Result<()>>
+    where
+        T: AsyncWrite + Unpin,
+        B: Buf,
+    {
+        let mut send_buffer = send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        // Send WINDOW_UPDATE frames first
+        //
+        // TODO: It would probably be better to interleave updates w/ data
+        // frames.
+        ready!(self
+            .actions
+            .recv
+            .poll_complete(cx, &mut self.store, &mut self.counts, dst))?;
+
+        // Send any other pending frames
+        ready!(self.actions.send.poll_complete(
+            cx,
+            send_buffer,
+            &mut self.store,
+            &mut self.counts,
+            dst
+        ))?;
+
+        // Nothing else to do, track the task
+        self.actions.task = Some(cx.waker().clone());
+
+        Poll::Ready(Ok(()))
+    }
+
+    fn send_reset<B>(&mut self, send_buffer: &SendBuffer<B>, id: StreamId, reason: Reason) {
+        let key = match self.store.find_entry(id) {
+            Entry::Occupied(e) => e.key(),
+            Entry::Vacant(e) => {
+                // Resetting a stream we don't know about? That could be OK...
+                //
+                // 1. As a server, we just received a request, but that request
+                //    was bad, so we're resetting before even accepting it.
+                //    This is totally fine.
+                //
+                // 2. The remote may have sent us a frame on new stream that
+                //    it's *not* supposed to have done, and thus, we don't know
+                //    the stream. In that case, sending a reset will "open" the
+                //    stream in our store. Maybe that should be a connection
+                //    error instead? At least for now, we need to update what
+                //    our vision of the next stream is.
+                if self.counts.peer().is_local_init(id) {
+                    // We normally would open this stream, so update our
+                    // next-send-id record.
+                    self.actions.send.maybe_reset_next_stream_id(id);
+                } else {
+                    // We normally would recv this stream, so update our
+                    // next-recv-id record.
+                    self.actions.recv.maybe_reset_next_stream_id(id);
+                }
+
+                let stream = Stream::new(id, 0, 0);
+
+                e.insert(stream)
+            }
+        };
+
+        let stream = self.store.resolve(key);
+        let mut send_buffer = send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+        self.actions.send_reset(
+            stream,
+            reason,
+            Initiator::Library,
+            &mut self.counts,
+            send_buffer,
+        );
+    }
+}
+
+impl<B> Streams<B, client::Peer>
+where
+    B: Buf,
+{
+    pub fn poll_pending_open(
+        &mut self,
+        cx: &Context,
+        pending: Option<&OpaqueStreamRef>,
+    ) -> Poll<Result<(), crate::Error>> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        me.actions.ensure_no_conn_error()?;
+        me.actions.send.ensure_next_stream_id()?;
+
+        if let Some(pending) = pending {
+            let mut stream = me.store.resolve(pending.key);
+            tracing::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open);
+            if stream.is_pending_open {
+                stream.wait_send(cx);
+                return Poll::Pending;
+            }
+        }
+        Poll::Ready(Ok(()))
+    }
+}
+
+impl<B, P> Streams<B, P>
+where
+    P: Peer,
+{
+    pub fn as_dyn(&self) -> DynStreams<B> {
+        let Self {
+            inner,
+            send_buffer,
+            _p,
+        } = self;
+        DynStreams {
+            inner,
+            send_buffer,
+            peer: P::r#dyn(),
+        }
+    }
+
+    /// This function is safe to call multiple times.
+    ///
+    /// A `Result` is returned to avoid panicking if the mutex is poisoned.
+    pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> {
+        self.as_dyn().recv_eof(clear_pending_accept)
+    }
+
+    pub(crate) fn max_send_streams(&self) -> usize {
+        self.inner.lock().unwrap().counts.max_send_streams()
+    }
+
+    pub(crate) fn max_recv_streams(&self) -> usize {
+        self.inner.lock().unwrap().counts.max_recv_streams()
+    }
+
+    #[cfg(feature = "unstable")]
+    pub fn num_active_streams(&self) -> usize {
+        let me = self.inner.lock().unwrap();
+        me.store.num_active_streams()
+    }
+
+    pub fn has_streams(&self) -> bool {
+        let me = self.inner.lock().unwrap();
+        me.counts.has_streams()
+    }
+
+    pub fn has_streams_or_other_references(&self) -> bool {
+        let me = self.inner.lock().unwrap();
+        me.counts.has_streams() || me.refs > 1
+    }
+
+    #[cfg(feature = "unstable")]
+    pub fn num_wired_streams(&self) -> usize {
+        let me = self.inner.lock().unwrap();
+        me.store.num_wired_streams()
+    }
+}
+
+// no derive because we don't need B and P to be Clone.
+impl<B, P> Clone for Streams<B, P>
+where
+    P: Peer,
+{
+    fn clone(&self) -> Self {
+        self.inner.lock().unwrap().refs += 1;
+        Streams {
+            inner: self.inner.clone(),
+            send_buffer: self.send_buffer.clone(),
+            _p: ::std::marker::PhantomData,
+        }
+    }
+}
+
+impl<B, P> Drop for Streams<B, P>
+where
+    P: Peer,
+{
+    fn drop(&mut self) {
+        if let Ok(mut inner) = self.inner.lock() {
+            inner.refs -= 1;
+            if inner.refs == 1 {
+                if let Some(task) = inner.actions.task.take() {
+                    task.wake();
+                }
+            }
+        }
+    }
+}
+
+// ===== impl StreamRef =====
+
+impl<B> StreamRef<B> {
+    pub fn send_data(&mut self, data: B, end_stream: bool) -> Result<(), UserError>
+    where
+        B: Buf,
+    {
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let stream = me.store.resolve(self.opaque.key);
+        let actions = &mut me.actions;
+        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        me.counts.transition(stream, |counts, stream| {
+            // Create the data frame
+            let mut frame = frame::Data::new(stream.id, data);
+            frame.set_end_stream(end_stream);
+
+            // Send the data frame
+            actions
+                .send
+                .send_data(frame, send_buffer, stream, counts, &mut actions.task)
+        })
+    }
+
+    pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), UserError> {
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let stream = me.store.resolve(self.opaque.key);
+        let actions = &mut me.actions;
+        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        me.counts.transition(stream, |counts, stream| {
+            // Create the trailers frame
+            let frame = frame::Headers::trailers(stream.id, trailers);
+
+            // Send the trailers frame
+            actions
+                .send
+                .send_trailers(frame, send_buffer, stream, counts, &mut actions.task)
+        })
+    }
+
+    pub fn send_reset(&mut self, reason: Reason) {
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let stream = me.store.resolve(self.opaque.key);
+        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        me.actions
+            .send_reset(stream, reason, Initiator::User, &mut me.counts, send_buffer);
+    }
+
+    pub fn send_response(
+        &mut self,
+        mut response: Response<()>,
+        end_of_stream: bool,
+    ) -> Result<(), UserError> {
+        // Clear before taking lock, incase extensions contain a StreamRef.
+        response.extensions_mut().clear();
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let stream = me.store.resolve(self.opaque.key);
+        let actions = &mut me.actions;
+        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        me.counts.transition(stream, |counts, stream| {
+            let frame = server::Peer::convert_send_message(stream.id, response, end_of_stream);
+
+            actions
+                .send
+                .send_headers(frame, send_buffer, stream, counts, &mut actions.task)
+        })
+    }
+
+    pub fn send_push_promise(
+        &mut self,
+        mut request: Request<()>,
+    ) -> Result<StreamRef<B>, UserError> {
+        // Clear before taking lock, incase extensions contain a StreamRef.
+        request.extensions_mut().clear();
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
+        let send_buffer = &mut *send_buffer;
+
+        let actions = &mut me.actions;
+        let promised_id = actions.send.reserve_local()?;
+
+        let child_key = {
+            let mut child_stream = me.store.insert(
+                promised_id,
+                Stream::new(
+                    promised_id,
+                    actions.send.init_window_sz(),
+                    actions.recv.init_window_sz(),
+                ),
+            );
+            child_stream.state.reserve_local()?;
+            child_stream.is_pending_push = true;
+            child_stream.key()
+        };
+
+        let pushed = {
+            let mut stream = me.store.resolve(self.opaque.key);
+
+            let frame = crate::server::Peer::convert_push_message(stream.id, promised_id, request)?;
+
+            actions
+                .send
+                .send_push_promise(frame, send_buffer, &mut stream, &mut actions.task)
+        };
+
+        if let Err(err) = pushed {
+            let mut child_stream = me.store.resolve(child_key);
+            child_stream.unlink();
+            child_stream.remove();
+            return Err(err);
+        }
+
+        me.refs += 1;
+        let opaque =
+            OpaqueStreamRef::new(self.opaque.inner.clone(), &mut me.store.resolve(child_key));
+
+        Ok(StreamRef {
+            opaque,
+            send_buffer: self.send_buffer.clone(),
+        })
+    }
+
+    /// Called by the server after the stream is accepted. Given that clients
+    /// initialize streams by sending HEADERS, the request will always be
+    /// available.
+    ///
+    /// # Panics
+    ///
+    /// This function panics if the request isn't present.
+    pub fn take_request(&self) -> Request<()> {
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.opaque.key);
+        me.actions.recv.take_request(&mut stream)
+    }
+
+    /// Called by a client to see if the current stream is pending open
+    pub fn is_pending_open(&self) -> bool {
+        let mut me = self.opaque.inner.lock().unwrap();
+        me.store.resolve(self.opaque.key).is_pending_open
+    }
+
+    /// Request capacity to send data
+    pub fn reserve_capacity(&mut self, capacity: WindowSize) {
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.opaque.key);
+
+        me.actions
+            .send
+            .reserve_capacity(capacity, &mut stream, &mut me.counts)
+    }
+
+    /// Returns the stream's current send capacity.
+    pub fn capacity(&self) -> WindowSize {
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.opaque.key);
+
+        me.actions.send.capacity(&mut stream)
+    }
+
+    /// Request to be notified when the stream's capacity increases
+    pub fn poll_capacity(&mut self, cx: &Context) -> Poll<Option<Result<WindowSize, UserError>>> {
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.opaque.key);
+
+        me.actions.send.poll_capacity(cx, &mut stream)
+    }
+
+    /// Request to be notified for if a `RST_STREAM` is received for this stream.
+    pub(crate) fn poll_reset(
+        &mut self,
+        cx: &Context,
+        mode: proto::PollReset,
+    ) -> Poll<Result<Reason, crate::Error>> {
+        let mut me = self.opaque.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.opaque.key);
+
+        me.actions
+            .send
+            .poll_reset(cx, &mut stream, mode)
+            .map_err(From::from)
+    }
+
+    pub fn clone_to_opaque(&self) -> OpaqueStreamRef {
+        self.opaque.clone()
+    }
+
+    pub fn stream_id(&self) -> StreamId {
+        self.opaque.stream_id()
+    }
+}
+
+impl<B> Clone for StreamRef<B> {
+    fn clone(&self) -> Self {
+        StreamRef {
+            opaque: self.opaque.clone(),
+            send_buffer: self.send_buffer.clone(),
+        }
+    }
+}
+
+// ===== impl OpaqueStreamRef =====
+
+impl OpaqueStreamRef {
+    fn new(inner: Arc<Mutex<Inner>>, stream: &mut store::Ptr) -> OpaqueStreamRef {
+        stream.ref_inc();
+        OpaqueStreamRef {
+            inner,
+            key: stream.key(),
+        }
+    }
+    /// Called by a client to check for a received response.
+    pub fn poll_response(&mut self, cx: &Context) -> Poll<Result<Response<()>, proto::Error>> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.key);
+
+        me.actions.recv.poll_response(cx, &mut stream)
+    }
+    /// Called by a client to check for a pushed request.
+    pub fn poll_pushed(
+        &mut self,
+        cx: &Context,
+    ) -> Poll<Option<Result<(Request<()>, OpaqueStreamRef), proto::Error>>> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.key);
+        me.actions
+            .recv
+            .poll_pushed(cx, &mut stream)
+            .map_ok(|(h, key)| {
+                me.refs += 1;
+                let opaque_ref =
+                    OpaqueStreamRef::new(self.inner.clone(), &mut me.store.resolve(key));
+                (h, opaque_ref)
+            })
+    }
+
+    pub fn is_end_stream(&self) -> bool {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let stream = me.store.resolve(self.key);
+
+        me.actions.recv.is_end_stream(&stream)
+    }
+
+    pub fn poll_data(&mut self, cx: &Context) -> Poll<Option<Result<Bytes, proto::Error>>> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.key);
+
+        me.actions.recv.poll_data(cx, &mut stream)
+    }
+
+    pub fn poll_trailers(&mut self, cx: &Context) -> Poll<Option<Result<HeaderMap, proto::Error>>> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.key);
+
+        me.actions.recv.poll_trailers(cx, &mut stream)
+    }
+
+    pub(crate) fn available_recv_capacity(&self) -> isize {
+        let me = self.inner.lock().unwrap();
+        let me = &*me;
+
+        let stream = &me.store[self.key];
+        stream.recv_flow.available().into()
+    }
+
+    pub(crate) fn used_recv_capacity(&self) -> WindowSize {
+        let me = self.inner.lock().unwrap();
+        let me = &*me;
+
+        let stream = &me.store[self.key];
+        stream.in_flight_recv_data
+    }
+
+    /// Releases recv capacity back to the peer. This may result in sending
+    /// WINDOW_UPDATE frames on both the stream and connection.
+    pub fn release_capacity(&mut self, capacity: WindowSize) -> Result<(), UserError> {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.key);
+
+        me.actions
+            .recv
+            .release_capacity(capacity, &mut stream, &mut me.actions.task)
+    }
+
+    /// Clear the receive queue and set the status to no longer receive data frames.
+    pub(crate) fn clear_recv_buffer(&mut self) {
+        let mut me = self.inner.lock().unwrap();
+        let me = &mut *me;
+
+        let mut stream = me.store.resolve(self.key);
+        stream.is_recv = false;
+        me.actions.recv.clear_recv_buffer(&mut stream);
+    }
+
+    pub fn stream_id(&self) -> StreamId {
+        self.inner.lock().unwrap().store[self.key].id
+    }
+}
+
+impl fmt::Debug for OpaqueStreamRef {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        use std::sync::TryLockError::*;
+
+        match self.inner.try_lock() {
+            Ok(me) => {
+                let stream = &me.store[self.key];
+                fmt.debug_struct("OpaqueStreamRef")
+                    .field("stream_id", &stream.id)
+                    .field("ref_count", &stream.ref_count)
+                    .finish()
+            }
+            Err(Poisoned(_)) => fmt
+                .debug_struct("OpaqueStreamRef")
+                .field("inner", &"<Poisoned>")
+                .finish(),
+            Err(WouldBlock) => fmt
+                .debug_struct("OpaqueStreamRef")
+                .field("inner", &"<Locked>")
+                .finish(),
+        }
+    }
+}
+
+impl Clone for OpaqueStreamRef {
+    fn clone(&self) -> Self {
+        // Increment the ref count
+        let mut inner = self.inner.lock().unwrap();
+        inner.store.resolve(self.key).ref_inc();
+        inner.refs += 1;
+
+        OpaqueStreamRef {
+            inner: self.inner.clone(),
+            key: self.key,
+        }
+    }
+}
+
+impl Drop for OpaqueStreamRef {
+    fn drop(&mut self) {
+        drop_stream_ref(&self.inner, self.key);
+    }
+}
+
+// TODO: Move back in fn above
+fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
+    let mut me = match inner.lock() {
+        Ok(inner) => inner,
+        Err(_) => {
+            if ::std::thread::panicking() {
+                tracing::trace!("StreamRef::drop; mutex poisoned");
+                return;
+            } else {
+                panic!("StreamRef::drop; mutex poisoned");
+            }
+        }
+    };
+
+    let me = &mut *me;
+    me.refs -= 1;
+    let mut stream = me.store.resolve(key);
+
+    tracing::trace!("drop_stream_ref; stream={:?}", stream);
+
+    // decrement the stream's ref count by 1.
+    stream.ref_dec();
+
+    let actions = &mut me.actions;
+
+    // If the stream is not referenced and it is already
+    // closed (does not have to go through logic below
+    // of canceling the stream), we should notify the task
+    // (connection) so that it can close properly
+    if stream.ref_count == 0 && stream.is_closed() {
+        if let Some(task) = actions.task.take() {
+            task.wake();
+        }
+    }
+
+    me.counts.transition(stream, |counts, stream| {
+        maybe_cancel(stream, actions, counts);
+
+        if stream.ref_count == 0 {
+            // Release any recv window back to connection, no one can access
+            // it anymore.
+            actions
+                .recv
+                .release_closed_capacity(stream, &mut actions.task);
+
+            // We won't be able to reach our push promises anymore
+            let mut ppp = stream.pending_push_promises.take();
+            while let Some(promise) = ppp.pop(stream.store_mut()) {
+                counts.transition(promise, |counts, stream| {
+                    maybe_cancel(stream, actions, counts);
+                });
+            }
+        }
+    });
+}
+
+fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) {
+    if stream.is_canceled_interest() {
+        // Server is allowed to early respond without fully consuming the client input stream
+        // But per the RFC, must send a RST_STREAM(NO_ERROR) in such cases. https://www.rfc-editor.org/rfc/rfc7540#section-8.1
+        // Some other http2 implementation may interpret other error code as fatal if not respected (i.e: nginx https://trac.nginx.org/nginx/ticket/2376)
+        let reason = if counts.peer().is_server()
+            && stream.state.is_send_closed()
+            && stream.state.is_recv_streaming()
+        {
+            Reason::NO_ERROR
+        } else {
+            Reason::CANCEL
+        };
+
+        actions
+            .send
+            .schedule_implicit_reset(stream, reason, counts, &mut actions.task);
+        actions.recv.enqueue_reset_expiration(stream, counts);
+    }
+}
+
+// ===== impl SendBuffer =====
+
+impl<B> SendBuffer<B> {
+    fn new() -> Self {
+        let inner = Mutex::new(Buffer::new());
+        SendBuffer { inner }
+    }
+
+    pub fn is_empty(&self) -> bool {
+        let buf = self.inner.lock().unwrap();
+        buf.is_empty()
+    }
+}
+
+// ===== impl Actions =====
+
+impl Actions {
+    fn send_reset<B>(
+        &mut self,
+        stream: store::Ptr,
+        reason: Reason,
+        initiator: Initiator,
+        counts: &mut Counts,
+        send_buffer: &mut Buffer<Frame<B>>,
+    ) {
+        counts.transition(stream, |counts, stream| {
+            self.send.send_reset(
+                reason,
+                initiator,
+                send_buffer,
+                stream,
+                counts,
+                &mut self.task,
+            );
+            self.recv.enqueue_reset_expiration(stream, counts);
+            // if a RecvStream is parked, ensure it's notified
+            stream.notify_recv();
+        });
+    }
+
+    fn reset_on_recv_stream_err<B>(
+        &mut self,
+        buffer: &mut Buffer<Frame<B>>,
+        stream: &mut store::Ptr,
+        counts: &mut Counts,
+        res: Result<(), Error>,
+    ) -> Result<(), Error> {
+        if let Err(Error::Reset(stream_id, reason, initiator)) = res {
+            debug_assert_eq!(stream_id, stream.id);
+
+            if counts.can_inc_num_local_error_resets() {
+                counts.inc_num_local_error_resets();
+
+                // Reset the stream.
+                self.send
+                    .send_reset(reason, initiator, buffer, stream, counts, &mut self.task);
+                Ok(())
+            } else {
+                tracing::warn!(
+                    "reset_on_recv_stream_err; locally-reset streams reached limit ({:?})",
+                    counts.max_local_error_resets().unwrap(),
+                );
+                Err(Error::library_go_away_data(
+                    Reason::ENHANCE_YOUR_CALM,
+                    "too_many_internal_resets",
+                ))
+            }
+        } else {
+            res
+        }
+    }
+
+    fn ensure_not_idle(&mut self, peer: peer::Dyn, id: StreamId) -> Result<(), Reason> {
+        if peer.is_local_init(id) {
+            self.send.ensure_not_idle(id)
+        } else {
+            self.recv.ensure_not_idle(id)
+        }
+    }
+
+    fn ensure_no_conn_error(&self) -> Result<(), proto::Error> {
+        if let Some(ref err) = self.conn_error {
+            Err(err.clone())
+        } else {
+            Ok(())
+        }
+    }
+
+    /// Check if we possibly could have processed and since forgotten this stream.
+    ///
+    /// If we send a RST_STREAM for a stream, we will eventually "forget" about
+    /// the stream to free up memory. It's possible that the remote peer had
+    /// frames in-flight, and by the time we receive them, our own state is
+    /// gone. We *could* tear everything down by sending a GOAWAY, but it
+    /// is more likely to be latency/memory constraints that caused this,
+    /// and not a bad actor. So be less catastrophic, the spec allows
+    /// us to send another RST_STREAM of STREAM_CLOSED.
+    fn may_have_forgotten_stream(&self, peer: peer::Dyn, id: StreamId) -> bool {
+        if id.is_zero() {
+            return false;
+        }
+        if peer.is_local_init(id) {
+            self.send.may_have_created_stream(id)
+        } else {
+            self.recv.may_have_created_stream(id)
+        }
+    }
+
+    fn clear_queues(&mut self, clear_pending_accept: bool, store: &mut Store, counts: &mut Counts) {
+        self.recv.clear_queues(clear_pending_accept, store, counts);
+        self.send.clear_queues(store, counts);
+    }
+}
diff --git a/src/server.rs b/src/server.rs
new file mode 100644
index 0000000..4f87222
--- /dev/null
+++ b/src/server.rs
@@ -0,0 +1,1638 @@
+//! Server implementation of the HTTP/2 protocol.
+//!
+//! # Getting started
+//!
+//! Running an HTTP/2 server requires the caller to manage accepting the
+//! connections as well as getting the connections to a state that is ready to
+//! begin the HTTP/2 handshake. See [here](../index.html#handshake) for more
+//! details.
+//!
+//! This could be as basic as using Tokio's [`TcpListener`] to accept
+//! connections, but usually it means using either ALPN or HTTP/1.1 protocol
+//! upgrades.
+//!
+//! Once a connection is obtained, it is passed to [`handshake`],
+//! which will begin the [HTTP/2 handshake]. This returns a future that
+//! completes once the handshake process is performed and HTTP/2 streams may
+//! be received.
+//!
+//! [`handshake`] uses default configuration values. There are a number of
+//! settings that can be changed by using [`Builder`] instead.
+//!
+//! # Inbound streams
+//!
+//! The [`Connection`] instance is used to accept inbound HTTP/2 streams. It
+//! does this by implementing [`futures::Stream`]. When a new stream is
+//! received, a call to [`Connection::accept`] will return `(request, response)`.
+//! The `request` handle (of type [`http::Request<RecvStream>`]) contains the
+//! HTTP request head as well as provides a way to receive the inbound data
+//! stream and the trailers. The `response` handle (of type [`SendResponse`])
+//! allows responding to the request, stream the response payload, send
+//! trailers, and send push promises.
+//!
+//! The send ([`SendStream`]) and receive ([`RecvStream`]) halves of the stream
+//! can be operated independently.
+//!
+//! # Managing the connection
+//!
+//! The [`Connection`] instance is used to manage connection state. The caller
+//! is required to call either [`Connection::accept`] or
+//! [`Connection::poll_close`] in order to advance the connection state. Simply
+//! operating on [`SendStream`] or [`RecvStream`] will have no effect unless the
+//! connection state is advanced.
+//!
+//! It is not required to call **both** [`Connection::accept`] and
+//! [`Connection::poll_close`]. If the caller is ready to accept a new stream,
+//! then only [`Connection::accept`] should be called. When the caller **does
+//! not** want to accept a new stream, [`Connection::poll_close`] should be
+//! called.
+//!
+//! The [`Connection`] instance should only be dropped once
+//! [`Connection::poll_close`] returns `Ready`. Once [`Connection::accept`]
+//! returns `Ready(None)`, there will no longer be any more inbound streams. At
+//! this point, only [`Connection::poll_close`] should be called.
+//!
+//! # Shutting down the server
+//!
+//! Graceful shutdown of the server is [not yet
+//! implemented](https://github.com/hyperium/h2/issues/69).
+//!
+//! # Example
+//!
+//! A basic HTTP/2 server example that runs over TCP and assumes [prior
+//! knowledge], i.e. both the client and the server assume that the TCP socket
+//! will use the HTTP/2 protocol without prior negotiation.
+//!
+//! ```no_run
+//! use h2::server;
+//! use http::{Response, StatusCode};
+//! use tokio::net::TcpListener;
+//!
+//! #[tokio::main]
+//! pub async fn main() {
+//!     let mut listener = TcpListener::bind("127.0.0.1:5928").await.unwrap();
+//!
+//!     // Accept all incoming TCP connections.
+//!     loop {
+//!         if let Ok((socket, _peer_addr)) = listener.accept().await {
+//!             // Spawn a new task to process each connection.
+//!             tokio::spawn(async {
+//!                 // Start the HTTP/2 connection handshake
+//!                 let mut h2 = server::handshake(socket).await.unwrap();
+//!                 // Accept all inbound HTTP/2 streams sent over the
+//!                 // connection.
+//!                 while let Some(request) = h2.accept().await {
+//!                     let (request, mut respond) = request.unwrap();
+//!                     println!("Received request: {:?}", request);
+//!
+//!                     // Build a response with no body
+//!                     let response = Response::builder()
+//!                         .status(StatusCode::OK)
+//!                         .body(())
+//!                         .unwrap();
+//!
+//!                     // Send the response back to the client
+//!                     respond.send_response(response, true)
+//!                         .unwrap();
+//!                 }
+//!
+//!             });
+//!         }
+//!     }
+//! }
+//! ```
+//!
+//! [prior knowledge]: http://httpwg.org/specs/rfc7540.html#known-http
+//! [`handshake`]: fn.handshake.html
+//! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
+//! [`Builder`]: struct.Builder.html
+//! [`Connection`]: struct.Connection.html
+//! [`Connection::poll`]: struct.Connection.html#method.poll
+//! [`Connection::poll_close`]: struct.Connection.html#method.poll_close
+//! [`futures::Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html
+//! [`http::Request<RecvStream>`]: ../struct.RecvStream.html
+//! [`RecvStream`]: ../struct.RecvStream.html
+//! [`SendStream`]: ../struct.SendStream.html
+//! [`TcpListener`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpListener.html
+
+use crate::codec::{Codec, UserError};
+use crate::frame::{self, Pseudo, PushPromiseHeaderError, Reason, Settings, StreamId};
+use crate::proto::{self, Config, Error, Prioritized};
+use crate::{FlowControl, PingPong, RecvStream, SendStream};
+
+use bytes::{Buf, Bytes};
+use http::{HeaderMap, Method, Request, Response};
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::time::Duration;
+use std::{fmt, io};
+use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
+use tracing::instrument::{Instrument, Instrumented};
+
+/// In progress HTTP/2 connection handshake future.
+///
+/// This type implements `Future`, yielding a `Connection` instance once the
+/// handshake has completed.
+///
+/// The handshake is completed once the connection preface is fully received
+/// from the client **and** the initial settings frame is sent to the client.
+///
+/// The handshake future does not wait for the initial settings frame from the
+/// client.
+///
+/// See [module] level docs for more details.
+///
+/// [module]: index.html
+#[must_use = "futures do nothing unless polled"]
+pub struct Handshake<T, B: Buf = Bytes> {
+    /// The config to pass to Connection::new after handshake succeeds.
+    builder: Builder,
+    /// The current state of the handshake.
+    state: Handshaking<T, B>,
+    /// Span tracking the handshake
+    span: tracing::Span,
+}
+
+/// Accepts inbound HTTP/2 streams on a connection.
+///
+/// A `Connection` is backed by an I/O resource (usually a TCP socket) and
+/// implements the HTTP/2 server logic for that connection. It is responsible
+/// for receiving inbound streams initiated by the client as well as driving the
+/// internal state forward.
+///
+/// `Connection` values are created by calling [`handshake`]. Once a
+/// `Connection` value is obtained, the caller must call [`poll`] or
+/// [`poll_close`] in order to drive the internal connection state forward.
+///
+/// See [module level] documentation for more details
+///
+/// [module level]: index.html
+/// [`handshake`]: struct.Connection.html#method.handshake
+/// [`poll`]: struct.Connection.html#method.poll
+/// [`poll_close`]: struct.Connection.html#method.poll_close
+///
+/// # Examples
+///
+/// ```
+/// # use tokio::io::{AsyncRead, AsyncWrite};
+/// # use h2::server;
+/// # use h2::server::*;
+/// #
+/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) {
+/// let mut server = server::handshake(my_io).await.unwrap();
+/// while let Some(request) = server.accept().await {
+///     tokio::spawn(async move {
+///         let (request, respond) = request.unwrap();
+///         // Process the request and send the response back to the client
+///         // using `respond`.
+///     });
+/// }
+/// # }
+/// #
+/// # pub fn main() {}
+/// ```
+#[must_use = "streams do nothing unless polled"]
+pub struct Connection<T, B: Buf> {
+    connection: proto::Connection<T, Peer, B>,
+}
+
+/// Builds server connections with custom configuration values.
+///
+/// Methods can be chained in order to set the configuration values.
+///
+/// The server is constructed by calling [`handshake`] and passing the I/O
+/// handle that will back the HTTP/2 server.
+///
+/// New instances of `Builder` are obtained via [`Builder::new`].
+///
+/// See function level documentation for details on the various server
+/// configuration settings.
+///
+/// [`Builder::new`]: struct.Builder.html#method.new
+/// [`handshake`]: struct.Builder.html#method.handshake
+///
+/// # Examples
+///
+/// ```
+/// # use tokio::io::{AsyncRead, AsyncWrite};
+/// # use h2::server::*;
+/// #
+/// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+/// # -> Handshake<T>
+/// # {
+/// // `server_fut` is a future representing the completion of the HTTP/2
+/// // handshake.
+/// let server_fut = Builder::new()
+///     .initial_window_size(1_000_000)
+///     .max_concurrent_streams(1000)
+///     .handshake(my_io);
+/// # server_fut
+/// # }
+/// #
+/// # pub fn main() {}
+/// ```
+#[derive(Clone, Debug)]
+pub struct Builder {
+    /// Time to keep locally reset streams around before reaping.
+    reset_stream_duration: Duration,
+
+    /// Maximum number of locally reset streams to keep at a time.
+    reset_stream_max: usize,
+
+    /// Maximum number of remotely reset streams to allow in the pending
+    /// accept queue.
+    pending_accept_reset_stream_max: usize,
+
+    /// Initial `Settings` frame to send as part of the handshake.
+    settings: Settings,
+
+    /// Initial target window size for new connections.
+    initial_target_connection_window_size: Option<u32>,
+
+    /// Maximum amount of bytes to "buffer" for writing per stream.
+    max_send_buffer_size: usize,
+
+    /// Maximum number of locally reset streams due to protocol error across
+    /// the lifetime of the connection.
+    ///
+    /// When this gets exceeded, we issue GOAWAYs.
+    local_max_error_reset_streams: Option<usize>,
+}
+
+/// Send a response back to the client
+///
+/// A `SendResponse` instance is provided when receiving a request and is used
+/// to send the associated response back to the client. It is also used to
+/// explicitly reset the stream with a custom reason.
+///
+/// It will also be used to initiate push promises linked with the associated
+/// stream.
+///
+/// If the `SendResponse` instance is dropped without sending a response, then
+/// the HTTP/2 stream will be reset.
+///
+/// See [module] level docs for more details.
+///
+/// [module]: index.html
+#[derive(Debug)]
+pub struct SendResponse<B: Buf> {
+    inner: proto::StreamRef<B>,
+}
+
+/// Send a response to a promised request
+///
+/// A `SendPushedResponse` instance is provided when promising a request and is used
+/// to send the associated response to the client. It is also used to
+/// explicitly reset the stream with a custom reason.
+///
+/// It can not be used to initiate push promises.
+///
+/// If the `SendPushedResponse` instance is dropped without sending a response, then
+/// the HTTP/2 stream will be reset.
+///
+/// See [module] level docs for more details.
+///
+/// [module]: index.html
+pub struct SendPushedResponse<B: Buf> {
+    inner: SendResponse<B>,
+}
+
+// Manual implementation necessary because of rust-lang/rust#26925
+impl<B: Buf + fmt::Debug> fmt::Debug for SendPushedResponse<B> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "SendPushedResponse {{ {:?} }}", self.inner)
+    }
+}
+
+/// Stages of an in-progress handshake.
+enum Handshaking<T, B: Buf> {
+    /// State 1. Connection is flushing pending SETTINGS frame.
+    Flushing(Instrumented<Flush<T, Prioritized<B>>>),
+    /// State 2. Connection is waiting for the client preface.
+    ReadingPreface(Instrumented<ReadPreface<T, Prioritized<B>>>),
+    /// State 3. Handshake is done, polling again would panic.
+    Done,
+}
+
+/// Flush a Sink
+struct Flush<T, B> {
+    codec: Option<Codec<T, B>>,
+}
+
+/// Read the client connection preface
+struct ReadPreface<T, B> {
+    codec: Option<Codec<T, B>>,
+    pos: usize,
+}
+
+#[derive(Debug)]
+pub(crate) struct Peer;
+
+const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
+
+/// Creates a new configured HTTP/2 server with default configuration
+/// values backed by `io`.
+///
+/// It is expected that `io` already be in an appropriate state to commence
+/// the [HTTP/2 handshake]. See [Handshake] for more details.
+///
+/// Returns a future which resolves to the [`Connection`] instance once the
+/// HTTP/2 handshake has been completed. The returned [`Connection`]
+/// instance will be using default configuration values. Use [`Builder`] to
+/// customize the configuration values used by a [`Connection`] instance.
+///
+/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
+/// [Handshake]: ../index.html#handshake
+/// [`Connection`]: struct.Connection.html
+///
+/// # Examples
+///
+/// ```
+/// # use tokio::io::{AsyncRead, AsyncWrite};
+/// # use h2::server;
+/// # use h2::server::*;
+/// #
+/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+/// # {
+/// let connection = server::handshake(my_io).await.unwrap();
+/// // The HTTP/2 handshake has completed, now use `connection` to
+/// // accept inbound HTTP/2 streams.
+/// # }
+/// #
+/// # pub fn main() {}
+/// ```
+pub fn handshake<T>(io: T) -> Handshake<T, Bytes>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+{
+    Builder::new().handshake(io)
+}
+
+// ===== impl Connection =====
+
+impl<T, B> Connection<T, B>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+    B: Buf,
+{
+    fn handshake2(io: T, builder: Builder) -> Handshake<T, B> {
+        let span = tracing::trace_span!("server_handshake");
+        let entered = span.enter();
+
+        // Create the codec.
+        let mut codec = Codec::new(io);
+
+        if let Some(max) = builder.settings.max_frame_size() {
+            codec.set_max_recv_frame_size(max as usize);
+        }
+
+        if let Some(max) = builder.settings.max_header_list_size() {
+            codec.set_max_recv_header_list_size(max as usize);
+        }
+
+        // Send initial settings frame.
+        codec
+            .buffer(builder.settings.clone().into())
+            .expect("invalid SETTINGS frame");
+
+        // Create the handshake future.
+        let state =
+            Handshaking::Flushing(Flush::new(codec).instrument(tracing::trace_span!("flush")));
+
+        drop(entered);
+
+        Handshake {
+            builder,
+            state,
+            span,
+        }
+    }
+
+    /// Accept the next incoming request on this connection.
+    pub async fn accept(
+        &mut self,
+    ) -> Option<Result<(Request<RecvStream>, SendResponse<B>), crate::Error>> {
+        futures_util::future::poll_fn(move |cx| self.poll_accept(cx)).await
+    }
+
+    #[doc(hidden)]
+    pub fn poll_accept(
+        &mut self,
+        cx: &mut Context<'_>,
+    ) -> Poll<Option<Result<(Request<RecvStream>, SendResponse<B>), crate::Error>>> {
+        // Always try to advance the internal state. Getting Pending also is
+        // needed to allow this function to return Pending.
+        if self.poll_closed(cx)?.is_ready() {
+            // If the socket is closed, don't return anything
+            // TODO: drop any pending streams
+            return Poll::Ready(None);
+        }
+
+        if let Some(inner) = self.connection.next_incoming() {
+            tracing::trace!("received incoming");
+            let (head, _) = inner.take_request().into_parts();
+            let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque()));
+
+            let request = Request::from_parts(head, body);
+            let respond = SendResponse { inner };
+
+            return Poll::Ready(Some(Ok((request, respond))));
+        }
+
+        Poll::Pending
+    }
+
+    /// Sets the target window size for the whole connection.
+    ///
+    /// If `size` is greater than the current value, then a `WINDOW_UPDATE`
+    /// frame will be immediately sent to the remote, increasing the connection
+    /// level window by `size - current_value`.
+    ///
+    /// If `size` is less than the current value, nothing will happen
+    /// immediately. However, as window capacity is released by
+    /// [`FlowControl`] instances, no `WINDOW_UPDATE` frames will be sent
+    /// out until the number of "in flight" bytes drops below `size`.
+    ///
+    /// The default value is 65,535.
+    ///
+    /// See [`FlowControl`] documentation for more details.
+    ///
+    /// [`FlowControl`]: ../struct.FlowControl.html
+    /// [library level]: ../index.html#flow-control
+    pub fn set_target_window_size(&mut self, size: u32) {
+        assert!(size <= proto::MAX_WINDOW_SIZE);
+        self.connection.set_target_window_size(size);
+    }
+
+    /// Set a new `INITIAL_WINDOW_SIZE` setting (in octets) for stream-level
+    /// flow control for received data.
+    ///
+    /// The `SETTINGS` will be sent to the remote, and only applied once the
+    /// remote acknowledges the change.
+    ///
+    /// This can be used to increase or decrease the window size for existing
+    /// streams.
+    ///
+    /// # Errors
+    ///
+    /// Returns an error if a previous call is still pending acknowledgement
+    /// from the remote endpoint.
+    pub fn set_initial_window_size(&mut self, size: u32) -> Result<(), crate::Error> {
+        assert!(size <= proto::MAX_WINDOW_SIZE);
+        self.connection.set_initial_window_size(size)?;
+        Ok(())
+    }
+
+    /// Enables the [extended CONNECT protocol].
+    ///
+    /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+    ///
+    /// # Errors
+    ///
+    /// Returns an error if a previous call is still pending acknowledgement
+    /// from the remote endpoint.
+    pub fn enable_connect_protocol(&mut self) -> Result<(), crate::Error> {
+        self.connection.set_enable_connect_protocol()?;
+        Ok(())
+    }
+
+    /// Returns `Ready` when the underlying connection has closed.
+    ///
+    /// If any new inbound streams are received during a call to `poll_closed`,
+    /// they will be queued and returned on the next call to [`poll_accept`].
+    ///
+    /// This function will advance the internal connection state, driving
+    /// progress on all the other handles (e.g. [`RecvStream`] and [`SendStream`]).
+    ///
+    /// See [here](index.html#managing-the-connection) for more details.
+    ///
+    /// [`poll_accept`]: struct.Connection.html#method.poll_accept
+    /// [`RecvStream`]: ../struct.RecvStream.html
+    /// [`SendStream`]: ../struct.SendStream.html
+    pub fn poll_closed(&mut self, cx: &mut Context) -> Poll<Result<(), crate::Error>> {
+        self.connection.poll(cx).map_err(Into::into)
+    }
+
+    /// Sets the connection to a GOAWAY state.
+    ///
+    /// Does not terminate the connection. Must continue being polled to close
+    /// connection.
+    ///
+    /// After flushing the GOAWAY frame, the connection is closed. Any
+    /// outstanding streams do not prevent the connection from closing. This
+    /// should usually be reserved for shutting down when something bad
+    /// external to `h2` has happened, and open streams cannot be properly
+    /// handled.
+    ///
+    /// For graceful shutdowns, see [`graceful_shutdown`](Connection::graceful_shutdown).
+    pub fn abrupt_shutdown(&mut self, reason: Reason) {
+        self.connection.go_away_from_user(reason);
+    }
+
+    /// Starts a [graceful shutdown][1] process.
+    ///
+    /// Must continue being polled to close connection.
+    ///
+    /// It's possible to receive more requests after calling this method, since
+    /// they might have been in-flight from the client already. After about
+    /// 1 RTT, no new requests should be accepted. Once all active streams
+    /// have completed, the connection is closed.
+    ///
+    /// [1]: http://httpwg.org/specs/rfc7540.html#GOAWAY
+    pub fn graceful_shutdown(&mut self) {
+        self.connection.go_away_gracefully();
+    }
+
+    /// Takes a `PingPong` instance from the connection.
+    ///
+    /// # Note
+    ///
+    /// This may only be called once. Calling multiple times will return `None`.
+    pub fn ping_pong(&mut self) -> Option<PingPong> {
+        self.connection.take_user_pings().map(PingPong::new)
+    }
+
+    /// Returns the maximum number of concurrent streams that may be initiated
+    /// by the server on this connection.
+    ///
+    /// This limit is configured by the client peer by sending the
+    /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame.
+    /// This method returns the currently acknowledged value received from the
+    /// remote.
+    ///
+    /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2
+    pub fn max_concurrent_send_streams(&self) -> usize {
+        self.connection.max_send_streams()
+    }
+
+    /// Returns the maximum number of concurrent streams that may be initiated
+    /// by the client on this connection.
+    ///
+    /// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS`
+    /// parameter][1] sent in a `SETTINGS` frame that has been
+    /// acknowledged by the remote peer. The value to be sent is configured by
+    /// the [`Builder::max_concurrent_streams`][2] method before handshaking
+    /// with the remote peer.
+    ///
+    /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2
+    /// [2]: ../struct.Builder.html#method.max_concurrent_streams
+    pub fn max_concurrent_recv_streams(&self) -> usize {
+        self.connection.max_recv_streams()
+    }
+
+    // Could disappear at anytime.
+    #[doc(hidden)]
+    #[cfg(feature = "unstable")]
+    pub fn num_wired_streams(&self) -> usize {
+        self.connection.num_wired_streams()
+    }
+}
+
+#[cfg(feature = "stream")]
+impl<T, B> futures_core::Stream for Connection<T, B>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+    B: Buf,
+{
+    type Item = Result<(Request<RecvStream>, SendResponse<B>), crate::Error>;
+
+    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+        self.poll_accept(cx)
+    }
+}
+
+impl<T, B> fmt::Debug for Connection<T, B>
+where
+    T: fmt::Debug,
+    B: fmt::Debug + Buf,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("Connection")
+            .field("connection", &self.connection)
+            .finish()
+    }
+}
+
+// ===== impl Builder =====
+
+impl Builder {
+    /// Returns a new server builder instance initialized with default
+    /// configuration values.
+    ///
+    /// Configuration methods can be chained on the return value.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .initial_window_size(1_000_000)
+    ///     .max_concurrent_streams(1000)
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn new() -> Builder {
+        Builder {
+            reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS),
+            reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX,
+            pending_accept_reset_stream_max: proto::DEFAULT_REMOTE_RESET_STREAM_MAX,
+            settings: Settings::default(),
+            initial_target_connection_window_size: None,
+            max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE,
+
+            local_max_error_reset_streams: Some(proto::DEFAULT_LOCAL_RESET_COUNT_MAX),
+        }
+    }
+
+    /// Indicates the initial window size (in octets) for stream-level
+    /// flow control for received data.
+    ///
+    /// The initial window of a stream is used as part of flow control. For more
+    /// details, see [`FlowControl`].
+    ///
+    /// The default value is 65,535.
+    ///
+    /// [`FlowControl`]: ../struct.FlowControl.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .initial_window_size(1_000_000)
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn initial_window_size(&mut self, size: u32) -> &mut Self {
+        self.settings.set_initial_window_size(Some(size));
+        self
+    }
+
+    /// Indicates the initial window size (in octets) for connection-level flow control
+    /// for received data.
+    ///
+    /// The initial window of a connection is used as part of flow control. For more details,
+    /// see [`FlowControl`].
+    ///
+    /// The default value is 65,535.
+    ///
+    /// [`FlowControl`]: ../struct.FlowControl.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .initial_connection_window_size(1_000_000)
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn initial_connection_window_size(&mut self, size: u32) -> &mut Self {
+        self.initial_target_connection_window_size = Some(size);
+        self
+    }
+
+    /// Indicates the size (in octets) of the largest HTTP/2 frame payload that the
+    /// configured server is able to accept.
+    ///
+    /// The sender may send data frames that are **smaller** than this value,
+    /// but any data larger than `max` will be broken up into multiple `DATA`
+    /// frames.
+    ///
+    /// The value **must** be between 16,384 and 16,777,215. The default value is 16,384.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .max_frame_size(1_000_000)
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if `max` is not within the legal range specified
+    /// above.
+    pub fn max_frame_size(&mut self, max: u32) -> &mut Self {
+        self.settings.set_max_frame_size(Some(max));
+        self
+    }
+
+    /// Sets the max size of received header frames.
+    ///
+    /// This advisory setting informs a peer of the maximum size of header list
+    /// that the sender is prepared to accept, in octets. The value is based on
+    /// the uncompressed size of header fields, including the length of the name
+    /// and value in octets plus an overhead of 32 octets for each header field.
+    ///
+    /// This setting is also used to limit the maximum amount of data that is
+    /// buffered to decode HEADERS frames.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .max_header_list_size(16 * 1024)
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn max_header_list_size(&mut self, max: u32) -> &mut Self {
+        self.settings.set_max_header_list_size(Some(max));
+        self
+    }
+
+    /// Sets the maximum number of concurrent streams.
+    ///
+    /// The maximum concurrent streams setting only controls the maximum number
+    /// of streams that can be initiated by the remote peer. In other words,
+    /// when this setting is set to 100, this does not limit the number of
+    /// concurrent streams that can be created by the caller.
+    ///
+    /// It is recommended that this value be no smaller than 100, so as to not
+    /// unnecessarily limit parallelism. However, any value is legal, including
+    /// 0. If `max` is set to 0, then the remote will not be permitted to
+    /// initiate streams.
+    ///
+    /// Note that streams in the reserved state, i.e., push promises that have
+    /// been reserved but the stream has not started, do not count against this
+    /// setting.
+    ///
+    /// Also note that if the remote *does* exceed the value set here, it is not
+    /// a protocol level error. Instead, the `h2` library will immediately reset
+    /// the stream.
+    ///
+    /// See [Section 5.1.2] in the HTTP/2 spec for more details.
+    ///
+    /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .max_concurrent_streams(1000)
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn max_concurrent_streams(&mut self, max: u32) -> &mut Self {
+        self.settings.set_max_concurrent_streams(Some(max));
+        self
+    }
+
+    /// Sets the maximum number of concurrent locally reset streams.
+    ///
+    /// When a stream is explicitly reset by either calling
+    /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance
+    /// before completing the stream, the HTTP/2 specification requires that
+    /// any further frames received for that stream must be ignored for "some
+    /// time".
+    ///
+    /// In order to satisfy the specification, internal state must be maintained
+    /// to implement the behavior. This state grows linearly with the number of
+    /// streams that are locally reset.
+    ///
+    /// The `max_concurrent_reset_streams` setting configures sets an upper
+    /// bound on the amount of state that is maintained. When this max value is
+    /// reached, the oldest reset stream is purged from memory.
+    ///
+    /// Once the stream has been fully purged from memory, any additional frames
+    /// received for that stream will result in a connection level protocol
+    /// error, forcing the connection to terminate.
+    ///
+    /// The default value is 10.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .max_concurrent_reset_streams(1000)
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
+        self.reset_stream_max = max;
+        self
+    }
+
+    /// Sets the maximum number of local resets due to protocol errors made by the remote end.
+    ///
+    /// Invalid frames and many other protocol errors will lead to resets being generated for those streams.
+    /// Too many of these often indicate a malicious client, and there are attacks which can abuse this to DOS servers.
+    /// This limit protects against these DOS attacks by limiting the amount of resets we can be forced to generate.
+    ///
+    /// When the number of local resets exceeds this threshold, the server will issue GOAWAYs with an error code of
+    /// `ENHANCE_YOUR_CALM` to the client.
+    ///
+    /// If you really want to disable this, supply [`Option::None`] here.
+    /// Disabling this is not recommended and may expose you to DOS attacks.
+    ///
+    /// The default value is currently 1024, but could change.
+    pub fn max_local_error_reset_streams(&mut self, max: Option<usize>) -> &mut Self {
+        self.local_max_error_reset_streams = max;
+        self
+    }
+
+    /// Sets the maximum number of pending-accept remotely-reset streams.
+    ///
+    /// Streams that have been received by the peer, but not accepted by the
+    /// user, can also receive a RST_STREAM. This is a legitimate pattern: one
+    /// could send a request and then shortly after, realize it is not needed,
+    /// sending a CANCEL.
+    ///
+    /// However, since those streams are now "closed", they don't count towards
+    /// the max concurrent streams. So, they will sit in the accept queue,
+    /// using memory.
+    ///
+    /// When the number of remotely-reset streams sitting in the pending-accept
+    /// queue reaches this maximum value, a connection error with the code of
+    /// `ENHANCE_YOUR_CALM` will be sent to the peer, and returned by the
+    /// `Future`.
+    ///
+    /// The default value is currently 20, but could change.
+    ///
+    /// # Examples
+    ///
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .max_pending_accept_reset_streams(100)
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn max_pending_accept_reset_streams(&mut self, max: usize) -> &mut Self {
+        self.pending_accept_reset_stream_max = max;
+        self
+    }
+
+    /// Sets the maximum send buffer size per stream.
+    ///
+    /// Once a stream has buffered up to (or over) the maximum, the stream's
+    /// flow control will not "poll" additional capacity. Once bytes for the
+    /// stream have been written to the connection, the send buffer capacity
+    /// will be freed up again.
+    ///
+    /// The default is currently ~400KB, but may change.
+    ///
+    /// # Panics
+    ///
+    /// This function panics if `max` is larger than `u32::MAX`.
+    pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self {
+        assert!(max <= std::u32::MAX as usize);
+        self.max_send_buffer_size = max;
+        self
+    }
+
+    /// Sets the maximum number of concurrent locally reset streams.
+    ///
+    /// When a stream is explicitly reset by either calling
+    /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance
+    /// before completing the stream, the HTTP/2 specification requires that
+    /// any further frames received for that stream must be ignored for "some
+    /// time".
+    ///
+    /// In order to satisfy the specification, internal state must be maintained
+    /// to implement the behavior. This state grows linearly with the number of
+    /// streams that are locally reset.
+    ///
+    /// The `reset_stream_duration` setting configures the max amount of time
+    /// this state will be maintained in memory. Once the duration elapses, the
+    /// stream state is purged from memory.
+    ///
+    /// Once the stream has been fully purged from memory, any additional frames
+    /// received for that stream will result in a connection level protocol
+    /// error, forcing the connection to terminate.
+    ///
+    /// The default value is 30 seconds.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// # use std::time::Duration;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .reset_stream_duration(Duration::from_secs(10))
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn reset_stream_duration(&mut self, dur: Duration) -> &mut Self {
+        self.reset_stream_duration = dur;
+        self
+    }
+
+    /// Enables the [extended CONNECT protocol].
+    ///
+    /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+    pub fn enable_connect_protocol(&mut self) -> &mut Self {
+        self.settings.set_enable_connect_protocol(Some(1));
+        self
+    }
+
+    /// Creates a new configured HTTP/2 server backed by `io`.
+    ///
+    /// It is expected that `io` already be in an appropriate state to commence
+    /// the [HTTP/2 handshake]. See [Handshake] for more details.
+    ///
+    /// Returns a future which resolves to the [`Connection`] instance once the
+    /// HTTP/2 handshake has been completed.
+    ///
+    /// This function also allows the caller to configure the send payload data
+    /// type. See [Outbound data type] for more details.
+    ///
+    /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
+    /// [Handshake]: ../index.html#handshake
+    /// [`Connection`]: struct.Connection.html
+    /// [Outbound data type]: ../index.html#outbound-data-type.
+    ///
+    /// # Examples
+    ///
+    /// Basic usage:
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut = Builder::new()
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    ///
+    /// Configures the send-payload data type. In this case, the outbound data
+    /// type will be `&'static [u8]`.
+    ///
+    /// ```
+    /// # use tokio::io::{AsyncRead, AsyncWrite};
+    /// # use h2::server::*;
+    /// #
+    /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
+    /// # -> Handshake<T, &'static [u8]>
+    /// # {
+    /// // `server_fut` is a future representing the completion of the HTTP/2
+    /// // handshake.
+    /// let server_fut: Handshake<_, &'static [u8]> = Builder::new()
+    ///     .handshake(my_io);
+    /// # server_fut
+    /// # }
+    /// #
+    /// # pub fn main() {}
+    /// ```
+    pub fn handshake<T, B>(&self, io: T) -> Handshake<T, B>
+    where
+        T: AsyncRead + AsyncWrite + Unpin,
+        B: Buf,
+    {
+        Connection::handshake2(io, self.clone())
+    }
+}
+
+impl Default for Builder {
+    fn default() -> Builder {
+        Builder::new()
+    }
+}
+
+// ===== impl SendResponse =====
+
+impl<B: Buf> SendResponse<B> {
+    /// Send a response to a client request.
+    ///
+    /// On success, a [`SendStream`] instance is returned. This instance can be
+    /// used to stream the response body and send trailers.
+    ///
+    /// If a body or trailers will be sent on the returned [`SendStream`]
+    /// instance, then `end_of_stream` must be set to `false` when calling this
+    /// function.
+    ///
+    /// The [`SendResponse`] instance is already associated with a received
+    /// request.  This function may only be called once per instance and only if
+    /// [`send_reset`] has not been previously called.
+    ///
+    /// [`SendResponse`]: #
+    /// [`SendStream`]: ../struct.SendStream.html
+    /// [`send_reset`]: #method.send_reset
+    pub fn send_response(
+        &mut self,
+        response: Response<()>,
+        end_of_stream: bool,
+    ) -> Result<SendStream<B>, crate::Error> {
+        self.inner
+            .send_response(response, end_of_stream)
+            .map(|_| SendStream::new(self.inner.clone()))
+            .map_err(Into::into)
+    }
+
+    /// Push a request and response to the client
+    ///
+    /// On success, a [`SendResponse`] instance is returned.
+    ///
+    /// [`SendResponse`]: #
+    pub fn push_request(
+        &mut self,
+        request: Request<()>,
+    ) -> Result<SendPushedResponse<B>, crate::Error> {
+        self.inner
+            .send_push_promise(request)
+            .map(|inner| SendPushedResponse {
+                inner: SendResponse { inner },
+            })
+            .map_err(Into::into)
+    }
+
+    /// Send a stream reset to the peer.
+    ///
+    /// This essentially cancels the stream, including any inbound or outbound
+    /// data streams.
+    ///
+    /// If this function is called before [`send_response`], a call to
+    /// [`send_response`] will result in an error.
+    ///
+    /// If this function is called while a [`SendStream`] instance is active,
+    /// any further use of the instance will result in an error.
+    ///
+    /// This function should only be called once.
+    ///
+    /// [`send_response`]: #method.send_response
+    /// [`SendStream`]: ../struct.SendStream.html
+    pub fn send_reset(&mut self, reason: Reason) {
+        self.inner.send_reset(reason)
+    }
+
+    /// Polls to be notified when the client resets this stream.
+    ///
+    /// If stream is still open, this returns `Poll::Pending`, and
+    /// registers the task to be notified if a `RST_STREAM` is received.
+    ///
+    /// If a `RST_STREAM` frame is received for this stream, calling this
+    /// method will yield the `Reason` for the reset.
+    ///
+    /// # Error
+    ///
+    /// Calling this method after having called `send_response` will return
+    /// a user error.
+    pub fn poll_reset(&mut self, cx: &mut Context) -> Poll<Result<Reason, crate::Error>> {
+        self.inner.poll_reset(cx, proto::PollReset::AwaitingHeaders)
+    }
+
+    /// Returns the stream ID of the response stream.
+    ///
+    /// # Panics
+    ///
+    /// If the lock on the stream store has been poisoned.
+    pub fn stream_id(&self) -> crate::StreamId {
+        crate::StreamId::from_internal(self.inner.stream_id())
+    }
+}
+
+// ===== impl SendPushedResponse =====
+
+impl<B: Buf> SendPushedResponse<B> {
+    /// Send a response to a promised request.
+    ///
+    /// On success, a [`SendStream`] instance is returned. This instance can be
+    /// used to stream the response body and send trailers.
+    ///
+    /// If a body or trailers will be sent on the returned [`SendStream`]
+    /// instance, then `end_of_stream` must be set to `false` when calling this
+    /// function.
+    ///
+    /// The [`SendPushedResponse`] instance is associated with a promised
+    /// request.  This function may only be called once per instance and only if
+    /// [`send_reset`] has not been previously called.
+    ///
+    /// [`SendPushedResponse`]: #
+    /// [`SendStream`]: ../struct.SendStream.html
+    /// [`send_reset`]: #method.send_reset
+    pub fn send_response(
+        &mut self,
+        response: Response<()>,
+        end_of_stream: bool,
+    ) -> Result<SendStream<B>, crate::Error> {
+        self.inner.send_response(response, end_of_stream)
+    }
+
+    /// Send a stream reset to the peer.
+    ///
+    /// This essentially cancels the stream, including any inbound or outbound
+    /// data streams.
+    ///
+    /// If this function is called before [`send_response`], a call to
+    /// [`send_response`] will result in an error.
+    ///
+    /// If this function is called while a [`SendStream`] instance is active,
+    /// any further use of the instance will result in an error.
+    ///
+    /// This function should only be called once.
+    ///
+    /// [`send_response`]: #method.send_response
+    /// [`SendStream`]: ../struct.SendStream.html
+    pub fn send_reset(&mut self, reason: Reason) {
+        self.inner.send_reset(reason)
+    }
+
+    /// Polls to be notified when the client resets this stream.
+    ///
+    /// If stream is still open, this returns `Poll::Pending`, and
+    /// registers the task to be notified if a `RST_STREAM` is received.
+    ///
+    /// If a `RST_STREAM` frame is received for this stream, calling this
+    /// method will yield the `Reason` for the reset.
+    ///
+    /// # Error
+    ///
+    /// Calling this method after having called `send_response` will return
+    /// a user error.
+    pub fn poll_reset(&mut self, cx: &mut Context) -> Poll<Result<Reason, crate::Error>> {
+        self.inner.poll_reset(cx)
+    }
+
+    /// Returns the stream ID of the response stream.
+    ///
+    /// # Panics
+    ///
+    /// If the lock on the stream store has been poisoned.
+    pub fn stream_id(&self) -> crate::StreamId {
+        self.inner.stream_id()
+    }
+}
+
+// ===== impl Flush =====
+
+impl<T, B: Buf> Flush<T, B> {
+    fn new(codec: Codec<T, B>) -> Self {
+        Flush { codec: Some(codec) }
+    }
+}
+
+impl<T, B> Future for Flush<T, B>
+where
+    T: AsyncWrite + Unpin,
+    B: Buf,
+{
+    type Output = Result<Codec<T, B>, crate::Error>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        // Flush the codec
+        ready!(self.codec.as_mut().unwrap().flush(cx)).map_err(crate::Error::from_io)?;
+
+        // Return the codec
+        Poll::Ready(Ok(self.codec.take().unwrap()))
+    }
+}
+
+impl<T, B: Buf> ReadPreface<T, B> {
+    fn new(codec: Codec<T, B>) -> Self {
+        ReadPreface {
+            codec: Some(codec),
+            pos: 0,
+        }
+    }
+
+    fn inner_mut(&mut self) -> &mut T {
+        self.codec.as_mut().unwrap().get_mut()
+    }
+}
+
+impl<T, B> Future for ReadPreface<T, B>
+where
+    T: AsyncRead + Unpin,
+    B: Buf,
+{
+    type Output = Result<Codec<T, B>, crate::Error>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        let mut buf = [0; 24];
+        let mut rem = PREFACE.len() - self.pos;
+
+        while rem > 0 {
+            let mut buf = ReadBuf::new(&mut buf[..rem]);
+            ready!(Pin::new(self.inner_mut()).poll_read(cx, &mut buf))
+                .map_err(crate::Error::from_io)?;
+            let n = buf.filled().len();
+            if n == 0 {
+                return Poll::Ready(Err(crate::Error::from_io(io::Error::new(
+                    io::ErrorKind::UnexpectedEof,
+                    "connection closed before reading preface",
+                ))));
+            }
+
+            if &PREFACE[self.pos..self.pos + n] != buf.filled() {
+                proto_err!(conn: "read_preface: invalid preface");
+                // TODO: Should this just write the GO_AWAY frame directly?
+                return Poll::Ready(Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()));
+            }
+
+            self.pos += n;
+            rem -= n; // TODO test
+        }
+
+        Poll::Ready(Ok(self.codec.take().unwrap()))
+    }
+}
+
+// ===== impl Handshake =====
+
+impl<T, B: Buf> Future for Handshake<T, B>
+where
+    T: AsyncRead + AsyncWrite + Unpin,
+    B: Buf,
+{
+    type Output = Result<Connection<T, B>, crate::Error>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        let span = self.span.clone(); // XXX(eliza): T_T
+        let _e = span.enter();
+        tracing::trace!(state = ?self.state);
+
+        loop {
+            match &mut self.state {
+                Handshaking::Flushing(flush) => {
+                    // We're currently flushing a pending SETTINGS frame. Poll the
+                    // flush future, and, if it's completed, advance our state to wait
+                    // for the client preface.
+                    let codec = match Pin::new(flush).poll(cx)? {
+                        Poll::Pending => {
+                            tracing::trace!(flush.poll = %"Pending");
+                            return Poll::Pending;
+                        }
+                        Poll::Ready(flushed) => {
+                            tracing::trace!(flush.poll = %"Ready");
+                            flushed
+                        }
+                    };
+                    self.state = Handshaking::ReadingPreface(
+                        ReadPreface::new(codec).instrument(tracing::trace_span!("read_preface")),
+                    );
+                }
+                Handshaking::ReadingPreface(read) => {
+                    let codec = ready!(Pin::new(read).poll(cx)?);
+
+                    self.state = Handshaking::Done;
+
+                    let connection = proto::Connection::new(
+                        codec,
+                        Config {
+                            next_stream_id: 2.into(),
+                            // Server does not need to locally initiate any streams
+                            initial_max_send_streams: 0,
+                            max_send_buffer_size: self.builder.max_send_buffer_size,
+                            reset_stream_duration: self.builder.reset_stream_duration,
+                            reset_stream_max: self.builder.reset_stream_max,
+                            remote_reset_stream_max: self.builder.pending_accept_reset_stream_max,
+                            local_error_reset_streams_max: self
+                                .builder
+                                .local_max_error_reset_streams,
+                            settings: self.builder.settings.clone(),
+                        },
+                    );
+
+                    tracing::trace!("connection established!");
+                    let mut c = Connection { connection };
+                    if let Some(sz) = self.builder.initial_target_connection_window_size {
+                        c.set_target_window_size(sz);
+                    }
+
+                    return Poll::Ready(Ok(c));
+                }
+                Handshaking::Done => {
+                    panic!("Handshaking::poll() called again after handshaking was complete")
+                }
+            }
+        }
+    }
+}
+
+impl<T, B> fmt::Debug for Handshake<T, B>
+where
+    T: AsyncRead + AsyncWrite + fmt::Debug,
+    B: fmt::Debug + Buf,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "server::Handshake")
+    }
+}
+
+impl Peer {
+    pub fn convert_send_message(
+        id: StreamId,
+        response: Response<()>,
+        end_of_stream: bool,
+    ) -> frame::Headers {
+        use http::response::Parts;
+
+        // Extract the components of the HTTP request
+        let (
+            Parts {
+                status, headers, ..
+            },
+            _,
+        ) = response.into_parts();
+
+        // Build the set pseudo header set. All requests will include `method`
+        // and `path`.
+        let pseudo = Pseudo::response(status);
+
+        // Create the HEADERS frame
+        let mut frame = frame::Headers::new(id, pseudo, headers);
+
+        if end_of_stream {
+            frame.set_end_stream()
+        }
+
+        frame
+    }
+
+    pub fn convert_push_message(
+        stream_id: StreamId,
+        promised_id: StreamId,
+        request: Request<()>,
+    ) -> Result<frame::PushPromise, UserError> {
+        use http::request::Parts;
+
+        if let Err(e) = frame::PushPromise::validate_request(&request) {
+            use PushPromiseHeaderError::*;
+            match e {
+                NotSafeAndCacheable => tracing::debug!(
+                    ?promised_id,
+                    "convert_push_message: method {} is not safe and cacheable",
+                    request.method(),
+                ),
+                InvalidContentLength(e) => tracing::debug!(
+                    ?promised_id,
+                    "convert_push_message; promised request has invalid content-length {:?}",
+                    e,
+                ),
+            }
+            return Err(UserError::MalformedHeaders);
+        }
+
+        // Extract the components of the HTTP request
+        let (
+            Parts {
+                method,
+                uri,
+                headers,
+                ..
+            },
+            _,
+        ) = request.into_parts();
+
+        let pseudo = Pseudo::request(method, uri, None);
+
+        Ok(frame::PushPromise::new(
+            stream_id,
+            promised_id,
+            pseudo,
+            headers,
+        ))
+    }
+}
+
+impl proto::Peer for Peer {
+    type Poll = Request<()>;
+
+    const NAME: &'static str = "Server";
+
+    /*
+    fn is_server() -> bool {
+        true
+    }
+    */
+
+    fn r#dyn() -> proto::DynPeer {
+        proto::DynPeer::Server
+    }
+
+    fn convert_poll_message(
+        pseudo: Pseudo,
+        fields: HeaderMap,
+        stream_id: StreamId,
+    ) -> Result<Self::Poll, Error> {
+        use http::{uri, Version};
+
+        let mut b = Request::builder();
+
+        macro_rules! malformed {
+            ($($arg:tt)*) => {{
+                tracing::debug!($($arg)*);
+                return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR));
+            }}
+        }
+
+        b = b.version(Version::HTTP_2);
+
+        let is_connect;
+        if let Some(method) = pseudo.method {
+            is_connect = method == Method::CONNECT;
+            b = b.method(method);
+        } else {
+            malformed!("malformed headers: missing method");
+        }
+
+        let has_protocol = pseudo.protocol.is_some();
+        if has_protocol {
+            if is_connect {
+                // Assert that we have the right type.
+                b = b.extension::<crate::ext::Protocol>(pseudo.protocol.unwrap());
+            } else {
+                malformed!("malformed headers: :protocol on non-CONNECT request");
+            }
+        }
+
+        if pseudo.status.is_some() {
+            malformed!("malformed headers: :status field on request");
+        }
+
+        // Convert the URI
+        let mut parts = uri::Parts::default();
+
+        // A request translated from HTTP/1 must not include the :authority
+        // header
+        if let Some(authority) = pseudo.authority {
+            let maybe_authority = uri::Authority::from_maybe_shared(authority.clone().into_inner());
+            parts.authority = Some(maybe_authority.or_else(|why| {
+                malformed!(
+                    "malformed headers: malformed authority ({:?}): {}",
+                    authority,
+                    why,
+                )
+            })?);
+        }
+
+        // A :scheme is required, except CONNECT.
+        if let Some(scheme) = pseudo.scheme {
+            if is_connect && !has_protocol {
+                malformed!("malformed headers: :scheme in CONNECT");
+            }
+            let maybe_scheme = scheme.parse();
+            let scheme = maybe_scheme.or_else(|why| {
+                malformed!(
+                    "malformed headers: malformed scheme ({:?}): {}",
+                    scheme,
+                    why,
+                )
+            })?;
+
+            // It's not possible to build an `Uri` from a scheme and path. So,
+            // after validating is was a valid scheme, we just have to drop it
+            // if there isn't an :authority.
+            if parts.authority.is_some() {
+                parts.scheme = Some(scheme);
+            }
+        } else if !is_connect || has_protocol {
+            malformed!("malformed headers: missing scheme");
+        }
+
+        if let Some(path) = pseudo.path {
+            if is_connect && !has_protocol {
+                malformed!("malformed headers: :path in CONNECT");
+            }
+
+            // This cannot be empty
+            if path.is_empty() {
+                malformed!("malformed headers: missing path");
+            }
+
+            let maybe_path = uri::PathAndQuery::from_maybe_shared(path.clone().into_inner());
+            parts.path_and_query = Some(maybe_path.or_else(|why| {
+                malformed!("malformed headers: malformed path ({:?}): {}", path, why,)
+            })?);
+        } else if is_connect && has_protocol {
+            malformed!("malformed headers: missing path in extended CONNECT");
+        }
+
+        b = b.uri(parts);
+
+        let mut request = match b.body(()) {
+            Ok(request) => request,
+            Err(e) => {
+                // TODO: Should there be more specialized handling for different
+                // kinds of errors
+                proto_err!(stream: "error building request: {}; stream={:?}", e, stream_id);
+                return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR));
+            }
+        };
+
+        *request.headers_mut() = fields;
+
+        Ok(request)
+    }
+}
+
+// ===== impl Handshaking =====
+
+impl<T, B> fmt::Debug for Handshaking<T, B>
+where
+    B: Buf,
+{
+    #[inline]
+    fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+        match *self {
+            Handshaking::Flushing(_) => f.write_str("Flushing(_)"),
+            Handshaking::ReadingPreface(_) => f.write_str("ReadingPreface(_)"),
+            Handshaking::Done => f.write_str("Done"),
+        }
+    }
+}
diff --git a/src/share.rs b/src/share.rs
new file mode 100644
index 0000000..26b4287
--- /dev/null
+++ b/src/share.rs
@@ -0,0 +1,606 @@
+use crate::codec::UserError;
+use crate::frame::Reason;
+use crate::proto::{self, WindowSize};
+
+use bytes::{Buf, Bytes};
+use http::HeaderMap;
+
+use std::fmt;
+#[cfg(feature = "stream")]
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Sends the body stream and trailers to the remote peer.
+///
+/// # Overview
+///
+/// A `SendStream` is provided by [`SendRequest`] and [`SendResponse`] once the
+/// HTTP/2 message header has been sent sent. It is used to stream the message
+/// body and send the message trailers. See method level documentation for more
+/// details.
+///
+/// The `SendStream` instance is also used to manage outbound flow control.
+///
+/// If a `SendStream` is dropped without explicitly closing the send stream, a
+/// `RST_STREAM` frame will be sent. This essentially cancels the request /
+/// response exchange.
+///
+/// The ways to explicitly close the send stream are:
+///
+/// * Set `end_of_stream` to true when calling [`send_request`],
+///   [`send_response`], or [`send_data`].
+/// * Send trailers with [`send_trailers`].
+/// * Explicitly reset the stream with [`send_reset`].
+///
+/// # Flow control
+///
+/// In HTTP/2, data cannot be sent to the remote peer unless there is
+/// available window capacity on both the stream and the connection. When a data
+/// frame is sent, both the stream window and the connection window are
+/// decremented. When the stream level window reaches zero, no further data can
+/// be sent on that stream. When the connection level window reaches zero, no
+/// further data can be sent on any stream for that connection.
+///
+/// When the remote peer is ready to receive more data, it sends `WINDOW_UPDATE`
+/// frames. These frames increment the windows. See the [specification] for more
+/// details on the principles of HTTP/2 flow control.
+///
+/// The implications for sending data are that the caller **should** ensure that
+/// both the stream and the connection has available window capacity before
+/// loading the data to send into memory. The `SendStream` instance provides the
+/// necessary APIs to perform this logic. This, however, is not an obligation.
+/// If the caller attempts to send data on a stream when there is no available
+/// window capacity, the library will buffer the data until capacity becomes
+/// available, at which point the buffer will be flushed to the connection.
+///
+/// **NOTE**: There is no bound on the amount of data that the library will
+/// buffer. If you are sending large amounts of data, you really should hook
+/// into the flow control lifecycle. Otherwise, you risk using up significant
+/// amounts of memory.
+///
+/// To hook into the flow control lifecycle, the caller signals to the library
+/// that it intends to send data by calling [`reserve_capacity`], specifying the
+/// amount of data, in octets, that the caller intends to send. After this,
+/// `poll_capacity` is used to be notified when the requested capacity is
+/// assigned to the stream. Once [`poll_capacity`] returns `Ready` with the number
+/// of octets available to the stream, the caller is able to actually send the
+/// data using [`send_data`].
+///
+/// Because there is also a connection level window that applies to **all**
+/// streams on a connection, when capacity is assigned to a stream (indicated by
+/// `poll_capacity` returning `Ready`), this capacity is reserved on the
+/// connection and will **not** be assigned to any other stream. If data is
+/// never written to the stream, that capacity is effectively lost to other
+/// streams and this introduces the risk of deadlocking a connection.
+///
+/// To avoid throttling data on a connection, the caller should not reserve
+/// capacity until ready to send data and once any capacity is assigned to the
+/// stream, the caller should immediately send data consuming this capacity.
+/// There is no guarantee as to when the full capacity requested will become
+/// available. For example, if the caller requests 64 KB of data and 512 bytes
+/// become available, the caller should immediately send 512 bytes of data.
+///
+/// See [`reserve_capacity`] documentation for more details.
+///
+/// [`SendRequest`]: client/struct.SendRequest.html
+/// [`SendResponse`]: server/struct.SendResponse.html
+/// [specification]: http://httpwg.org/specs/rfc7540.html#FlowControl
+/// [`reserve_capacity`]: #method.reserve_capacity
+/// [`poll_capacity`]: #method.poll_capacity
+/// [`send_data`]: #method.send_data
+/// [`send_request`]: client/struct.SendRequest.html#method.send_request
+/// [`send_response`]: server/struct.SendResponse.html#method.send_response
+/// [`send_data`]: #method.send_data
+/// [`send_trailers`]: #method.send_trailers
+/// [`send_reset`]: #method.send_reset
+#[derive(Debug)]
+pub struct SendStream<B> {
+    inner: proto::StreamRef<B>,
+}
+
+/// A stream identifier, as described in [Section 5.1.1] of RFC 7540.
+///
+/// Streams are identified with an unsigned 31-bit integer. Streams
+/// initiated by a client MUST use odd-numbered stream identifiers; those
+/// initiated by the server MUST use even-numbered stream identifiers.  A
+/// stream identifier of zero (0x0) is used for connection control
+/// messages; the stream identifier of zero cannot be used to establish a
+/// new stream.
+///
+/// [Section 5.1.1]: https://tools.ietf.org/html/rfc7540#section-5.1.1
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
+pub struct StreamId(u32);
+
+impl From<StreamId> for u32 {
+    fn from(src: StreamId) -> Self {
+        src.0
+    }
+}
+
+/// Receives the body stream and trailers from the remote peer.
+///
+/// A `RecvStream` is provided by [`client::ResponseFuture`] and
+/// [`server::Connection`] with the received HTTP/2 message head (the response
+/// and request head respectively).
+///
+/// A `RecvStream` instance is used to receive the streaming message body and
+/// any trailers from the remote peer. It is also used to manage inbound flow
+/// control.
+///
+/// See method level documentation for more details on receiving data. See
+/// [`FlowControl`] for more details on inbound flow control.
+///
+/// [`client::ResponseFuture`]: client/struct.ResponseFuture.html
+/// [`server::Connection`]: server/struct.Connection.html
+/// [`FlowControl`]: struct.FlowControl.html
+/// [`Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html
+#[must_use = "streams do nothing unless polled"]
+pub struct RecvStream {
+    inner: FlowControl,
+}
+
+/// A handle to release window capacity to a remote stream.
+///
+/// This type allows the caller to manage inbound data [flow control]. The
+/// caller is expected to call [`release_capacity`] after dropping data frames.
+///
+/// # Overview
+///
+/// Each stream has a window size. This window size is the maximum amount of
+/// inbound data that can be in-flight. In-flight data is defined as data that
+/// has been received, but not yet released.
+///
+/// When a stream is created, the window size is set to the connection's initial
+/// window size value. When a data frame is received, the window size is then
+/// decremented by size of the data frame before the data is provided to the
+/// caller. As the caller finishes using the data, [`release_capacity`] must be
+/// called. This will then increment the window size again, allowing the peer to
+/// send more data.
+///
+/// There is also a connection level window as well as the stream level window.
+/// Received data counts against the connection level window as well and calls
+/// to [`release_capacity`] will also increment the connection level window.
+///
+/// # Sending `WINDOW_UPDATE` frames
+///
+/// `WINDOW_UPDATE` frames will not be sent out for **every** call to
+/// `release_capacity`, as this would end up slowing down the protocol. Instead,
+/// `h2` waits until the window size is increased to a certain threshold and
+/// then sends out a single `WINDOW_UPDATE` frame representing all the calls to
+/// `release_capacity` since the last `WINDOW_UPDATE` frame.
+///
+/// This essentially batches window updating.
+///
+/// # Scenarios
+///
+/// Following is a basic scenario with an HTTP/2 connection containing a
+/// single active stream.
+///
+/// * A new stream is activated. The receive window is initialized to 1024 (the
+///   value of the initial window size for this connection).
+/// * A `DATA` frame is received containing a payload of 600 bytes.
+/// * The receive window size is reduced to 424 bytes.
+/// * [`release_capacity`] is called with 200.
+/// * The receive window size is now 624 bytes. The peer may send no more than
+///   this.
+/// * A `DATA` frame is received with a payload of 624 bytes.
+/// * The window size is now 0 bytes. The peer may not send any more data.
+/// * [`release_capacity`] is called with 1024.
+/// * The receive window size is now 1024 bytes. The peer may now send more
+/// data.
+///
+/// [flow control]: ../index.html#flow-control
+/// [`release_capacity`]: struct.FlowControl.html#method.release_capacity
+#[derive(Clone, Debug)]
+pub struct FlowControl {
+    inner: proto::OpaqueStreamRef,
+}
+
+/// A handle to send and receive PING frames with the peer.
+// NOT Clone on purpose
+pub struct PingPong {
+    inner: proto::UserPings,
+}
+
+/// Sent via [`PingPong`][] to send a PING frame to a peer.
+///
+/// [`PingPong`]: struct.PingPong.html
+pub struct Ping {
+    _p: (),
+}
+
+/// Received via [`PingPong`][] when a peer acknowledges a [`Ping`][].
+///
+/// [`PingPong`]: struct.PingPong.html
+/// [`Ping`]: struct.Ping.html
+pub struct Pong {
+    _p: (),
+}
+
+// ===== impl SendStream =====
+
+impl<B: Buf> SendStream<B> {
+    pub(crate) fn new(inner: proto::StreamRef<B>) -> Self {
+        SendStream { inner }
+    }
+
+    /// Requests capacity to send data.
+    ///
+    /// This function is used to express intent to send data. This requests
+    /// connection level capacity. Once the capacity is available, it is
+    /// assigned to the stream and not reused by other streams.
+    ///
+    /// This function may be called repeatedly. The `capacity` argument is the
+    /// **total** amount of requested capacity. Sequential calls to
+    /// `reserve_capacity` are *not* additive. Given the following:
+    ///
+    /// ```rust
+    /// # use h2::*;
+    /// # fn doc(mut send_stream: SendStream<&'static [u8]>) {
+    /// send_stream.reserve_capacity(100);
+    /// send_stream.reserve_capacity(200);
+    /// # }
+    /// ```
+    ///
+    /// After the second call to `reserve_capacity`, the *total* requested
+    /// capacity will be 200.
+    ///
+    /// `reserve_capacity` is also used to cancel previous capacity requests.
+    /// Given the following:
+    ///
+    /// ```rust
+    /// # use h2::*;
+    /// # fn doc(mut send_stream: SendStream<&'static [u8]>) {
+    /// send_stream.reserve_capacity(100);
+    /// send_stream.reserve_capacity(0);
+    /// # }
+    /// ```
+    ///
+    /// After the second call to `reserve_capacity`, the *total* requested
+    /// capacity will be 0, i.e. there is no requested capacity for the stream.
+    ///
+    /// If `reserve_capacity` is called with a lower value than the amount of
+    /// capacity **currently** assigned to the stream, this capacity will be
+    /// returned to the connection to be re-assigned to other streams.
+    ///
+    /// Also, the amount of capacity that is reserved gets decremented as data
+    /// is sent. For example:
+    ///
+    /// ```rust
+    /// # use h2::*;
+    /// # async fn doc(mut send_stream: SendStream<&'static [u8]>) {
+    /// send_stream.reserve_capacity(100);
+    ///
+    /// send_stream.send_data(b"hello", false).unwrap();
+    /// // At this point, the total amount of requested capacity is 95 bytes.
+    ///
+    /// // Calling `reserve_capacity` with `100` again essentially requests an
+    /// // additional 5 bytes.
+    /// send_stream.reserve_capacity(100);
+    /// # }
+    /// ```
+    ///
+    /// See [Flow control](struct.SendStream.html#flow-control) for an overview
+    /// of how send flow control works.
+    pub fn reserve_capacity(&mut self, capacity: usize) {
+        // TODO: Check for overflow
+        self.inner.reserve_capacity(capacity as WindowSize)
+    }
+
+    /// Returns the stream's current send capacity.
+    ///
+    /// This allows the caller to check the current amount of available capacity
+    /// before sending data.
+    pub fn capacity(&self) -> usize {
+        self.inner.capacity() as usize
+    }
+
+    /// Requests to be notified when the stream's capacity increases.
+    ///
+    /// Before calling this, capacity should be requested with
+    /// `reserve_capacity`. Once capacity is requested, the connection will
+    /// assign capacity to the stream **as it becomes available**. There is no
+    /// guarantee as to when and in what increments capacity gets assigned to
+    /// the stream.
+    ///
+    /// To get notified when the available capacity increases, the caller calls
+    /// `poll_capacity`, which returns `Ready(Some(n))` when `n` has been
+    /// increased by the connection. Note that `n` here represents the **total**
+    /// amount of assigned capacity at that point in time. It is also possible
+    /// that `n` is lower than the previous call if, since then, the caller has
+    /// sent data.
+    pub fn poll_capacity(&mut self, cx: &mut Context) -> Poll<Option<Result<usize, crate::Error>>> {
+        self.inner
+            .poll_capacity(cx)
+            .map_ok(|w| w as usize)
+            .map_err(Into::into)
+    }
+
+    /// Sends a single data frame to the remote peer.
+    ///
+    /// This function may be called repeatedly as long as `end_of_stream` is set
+    /// to `false`. Setting `end_of_stream` to `true` sets the end stream flag
+    /// on the data frame. Any further calls to `send_data` or `send_trailers`
+    /// will return an [`Error`].
+    ///
+    /// `send_data` can be called without reserving capacity. In this case, the
+    /// data is buffered and the capacity is implicitly requested. Once the
+    /// capacity becomes available, the data is flushed to the connection.
+    /// However, this buffering is unbounded. As such, sending large amounts of
+    /// data without reserving capacity before hand could result in large
+    /// amounts of data being buffered in memory.
+    ///
+    /// [`Error`]: struct.Error.html
+    pub fn send_data(&mut self, data: B, end_of_stream: bool) -> Result<(), crate::Error> {
+        self.inner
+            .send_data(data, end_of_stream)
+            .map_err(Into::into)
+    }
+
+    /// Sends trailers to the remote peer.
+    ///
+    /// Sending trailers implicitly closes the send stream. Once the send stream
+    /// is closed, no more data can be sent.
+    pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), crate::Error> {
+        self.inner.send_trailers(trailers).map_err(Into::into)
+    }
+
+    /// Resets the stream.
+    ///
+    /// This cancels the request / response exchange. If the response has not
+    /// yet been received, the associated `ResponseFuture` will return an
+    /// [`Error`] to reflect the canceled exchange.
+    ///
+    /// [`Error`]: struct.Error.html
+    pub fn send_reset(&mut self, reason: Reason) {
+        self.inner.send_reset(reason)
+    }
+
+    /// Polls to be notified when the client resets this stream.
+    ///
+    /// If stream is still open, this returns `Poll::Pending`, and
+    /// registers the task to be notified if a `RST_STREAM` is received.
+    ///
+    /// If a `RST_STREAM` frame is received for this stream, calling this
+    /// method will yield the `Reason` for the reset.
+    ///
+    /// # Error
+    ///
+    /// If connection sees an error, this returns that error instead of a
+    /// `Reason`.
+    pub fn poll_reset(&mut self, cx: &mut Context) -> Poll<Result<Reason, crate::Error>> {
+        self.inner.poll_reset(cx, proto::PollReset::Streaming)
+    }
+
+    /// Returns the stream ID of this `SendStream`.
+    ///
+    /// # Panics
+    ///
+    /// If the lock on the stream store has been poisoned.
+    pub fn stream_id(&self) -> StreamId {
+        StreamId::from_internal(self.inner.stream_id())
+    }
+}
+
+// ===== impl StreamId =====
+
+impl StreamId {
+    pub(crate) fn from_internal(id: crate::frame::StreamId) -> Self {
+        StreamId(id.into())
+    }
+
+    /// Returns the `u32` corresponding to this `StreamId`
+    ///
+    /// # Note
+    ///
+    /// This is the same as the `From<StreamId>` implementation, but
+    /// included as an inherent method because that implementation doesn't
+    /// appear in rustdocs, as well as a way to force the type instead of
+    /// relying on inference.
+    pub fn as_u32(&self) -> u32 {
+        (*self).into()
+    }
+}
+// ===== impl RecvStream =====
+
+impl RecvStream {
+    pub(crate) fn new(inner: FlowControl) -> Self {
+        RecvStream { inner }
+    }
+
+    /// Get the next data frame.
+    pub async fn data(&mut self) -> Option<Result<Bytes, crate::Error>> {
+        futures_util::future::poll_fn(move |cx| self.poll_data(cx)).await
+    }
+
+    /// Get optional trailers for this stream.
+    pub async fn trailers(&mut self) -> Result<Option<HeaderMap>, crate::Error> {
+        futures_util::future::poll_fn(move |cx| self.poll_trailers(cx)).await
+    }
+
+    /// Poll for the next data frame.
+    pub fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, crate::Error>>> {
+        self.inner.inner.poll_data(cx).map_err(Into::into)
+    }
+
+    #[doc(hidden)]
+    pub fn poll_trailers(
+        &mut self,
+        cx: &mut Context,
+    ) -> Poll<Result<Option<HeaderMap>, crate::Error>> {
+        match ready!(self.inner.inner.poll_trailers(cx)) {
+            Some(Ok(map)) => Poll::Ready(Ok(Some(map))),
+            Some(Err(e)) => Poll::Ready(Err(e.into())),
+            None => Poll::Ready(Ok(None)),
+        }
+    }
+
+    /// Returns true if the receive half has reached the end of stream.
+    ///
+    /// A return value of `true` means that calls to `poll` and `poll_trailers`
+    /// will both return `None`.
+    pub fn is_end_stream(&self) -> bool {
+        self.inner.inner.is_end_stream()
+    }
+
+    /// Get a mutable reference to this stream's `FlowControl`.
+    ///
+    /// It can be used immediately, or cloned to be used later.
+    pub fn flow_control(&mut self) -> &mut FlowControl {
+        &mut self.inner
+    }
+
+    /// Returns the stream ID of this stream.
+    ///
+    /// # Panics
+    ///
+    /// If the lock on the stream store has been poisoned.
+    pub fn stream_id(&self) -> StreamId {
+        self.inner.stream_id()
+    }
+}
+
+#[cfg(feature = "stream")]
+impl futures_core::Stream for RecvStream {
+    type Item = Result<Bytes, crate::Error>;
+
+    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+        self.poll_data(cx)
+    }
+}
+
+impl fmt::Debug for RecvStream {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("RecvStream")
+            .field("inner", &self.inner)
+            .finish()
+    }
+}
+
+impl Drop for RecvStream {
+    fn drop(&mut self) {
+        // Eagerly clear any received DATA frames now, since its no longer
+        // possible to retrieve them. However, this will be called
+        // again once *all* stream refs have been dropped, since
+        // this won't send a RST_STREAM frame, in case the user wishes to
+        // still *send* DATA.
+        self.inner.inner.clear_recv_buffer();
+    }
+}
+
+// ===== impl FlowControl =====
+
+impl FlowControl {
+    pub(crate) fn new(inner: proto::OpaqueStreamRef) -> Self {
+        FlowControl { inner }
+    }
+
+    /// Returns the stream ID of the stream whose capacity will
+    /// be released by this `FlowControl`.
+    pub fn stream_id(&self) -> StreamId {
+        StreamId::from_internal(self.inner.stream_id())
+    }
+
+    /// Get the current available capacity of data this stream *could* receive.
+    pub fn available_capacity(&self) -> isize {
+        self.inner.available_recv_capacity()
+    }
+
+    /// Get the currently *used* capacity for this stream.
+    ///
+    /// This is the amount of bytes that can be released back to the remote.
+    pub fn used_capacity(&self) -> usize {
+        self.inner.used_recv_capacity() as usize
+    }
+
+    /// Release window capacity back to remote stream.
+    ///
+    /// This releases capacity back to the stream level and the connection level
+    /// windows. Both window sizes will be increased by `sz`.
+    ///
+    /// See [struct level] documentation for more details.
+    ///
+    /// # Errors
+    ///
+    /// This function errors if increasing the receive window size by `sz` would
+    /// result in a window size greater than the target window size. In other
+    /// words, the caller cannot release more capacity than data has been
+    /// received. If 1024 bytes of data have been received, at most 1024 bytes
+    /// can be released.
+    ///
+    /// [struct level]: #
+    pub fn release_capacity(&mut self, sz: usize) -> Result<(), crate::Error> {
+        if sz > proto::MAX_WINDOW_SIZE as usize {
+            return Err(UserError::ReleaseCapacityTooBig.into());
+        }
+        self.inner
+            .release_capacity(sz as proto::WindowSize)
+            .map_err(Into::into)
+    }
+}
+
+// ===== impl PingPong =====
+
+impl PingPong {
+    pub(crate) fn new(inner: proto::UserPings) -> Self {
+        PingPong { inner }
+    }
+
+    /// Send a PING frame and wait for the peer to send the pong.
+    pub async fn ping(&mut self, ping: Ping) -> Result<Pong, crate::Error> {
+        self.send_ping(ping)?;
+        futures_util::future::poll_fn(|cx| self.poll_pong(cx)).await
+    }
+
+    #[doc(hidden)]
+    pub fn send_ping(&mut self, ping: Ping) -> Result<(), crate::Error> {
+        // Passing a `Ping` here is just to be forwards-compatible with
+        // eventually allowing choosing a ping payload. For now, we can
+        // just ignore it.
+        let _ = ping;
+
+        self.inner.send_ping().map_err(|err| match err {
+            Some(err) => err.into(),
+            None => UserError::SendPingWhilePending.into(),
+        })
+    }
+
+    #[doc(hidden)]
+    pub fn poll_pong(&mut self, cx: &mut Context) -> Poll<Result<Pong, crate::Error>> {
+        ready!(self.inner.poll_pong(cx))?;
+        Poll::Ready(Ok(Pong { _p: () }))
+    }
+}
+
+impl fmt::Debug for PingPong {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("PingPong").finish()
+    }
+}
+
+// ===== impl Ping =====
+
+impl Ping {
+    /// Creates a new opaque `Ping` to be sent via a [`PingPong`][].
+    ///
+    /// The payload is "opaque", such that it shouldn't be depended on.
+    ///
+    /// [`PingPong`]: struct.PingPong.html
+    pub fn opaque() -> Ping {
+        Ping { _p: () }
+    }
+}
+
+impl fmt::Debug for Ping {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("Ping").finish()
+    }
+}
+
+// ===== impl Pong =====
+
+impl fmt::Debug for Pong {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("Pong").finish()
+    }
+}