components: Implement Codec2.0 V4L2 decode component

This CL implements V4L2DecodeComponent, a decode C2Component via V4L2
API. It mainly contains these parts:

- VideoDecoder:
  An interface for video decoding, similar to media::VideoDecoder at
Chromium project.
- V4L2Decoder:
  An implementation of VideoDecoder via V4L2 stateful API. We wrap all
  V4L2-related logic in this class

- VideoFrame
  The data structure to represent the output buffer of VideoDecoder.
  It encapsulates a C2GraphicBlock and only exposes the essential
  information of physical buffers.
- VideoFramePool:
  The buffer pool used by VideoDecoder. It encapsulates C2BlockPool
  and provides asynchronous method to fetch buffers.

- V4L2DecodeComponent, V4L2DecodeInterface:
  The C2Component implementation that delegates the decode request to
  V4L2Decoder.
- V4L2ComponentFactory:
  The C2ComponentFactory that creates V4L2DecodeComponent.

Bug: 152714603
Test: mmm external/v4l2_codec2/
Test: Run e2e test and make sure the V4L2DecodeComponent is running

Change-Id: I23e9b3553b62dd6c83d943d7787297305372c37d
diff --git a/components/Android.bp b/components/Android.bp
new file mode 100644
index 0000000..19dc794
--- /dev/null
+++ b/components/Android.bp
@@ -0,0 +1,43 @@
+cc_library {
+    name: "libv4l2_codec2_components",
+    vendor: true,
+
+    defaults: [
+        "libcodec2-impl-defaults",
+    ],
+
+    srcs: [
+        "VideoFrame.cpp",
+        "VideoFramePool.cpp",
+        "V4L2Decoder.cpp",
+        "V4L2ComponentFactory.cpp",
+        "V4L2DecodeComponent.cpp",
+        "V4L2DecodeInterface.cpp",
+        "VideoDecoder.cpp",
+        "VideoTypes.cpp",
+    ],
+    export_include_dirs: [
+        "include",
+    ],
+
+    shared_libs: [
+        "[email protected]",
+        "libchrome",
+        "libcodec2_soft_common",
+        "libcutils",
+        "liblog",
+        "libsfplugin_ccodec_utils",
+        "libstagefright_foundation",
+        "libv4l2_codec2_store",
+    ],
+    static_libs: [
+        "libv4l2_codec2_accel",
+        "libv4l2_codec2_common",
+    ],
+
+    cflags: [
+      "-Werror",
+      "-Wall",
+      "-Wno-unused-parameter",  // needed for libchrome/base codes
+    ],
+}
diff --git a/components/V4L2ComponentFactory.cpp b/components/V4L2ComponentFactory.cpp
new file mode 100644
index 0000000..1dca7ff
--- /dev/null
+++ b/components/V4L2ComponentFactory.cpp
@@ -0,0 +1,112 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "V4L2ComponentFactory"
+
+#include <string>
+
+#include <C2ComponentFactory.h>
+#include <SimpleC2Interface.h>
+#include <log/log.h>
+#include <util/C2InterfaceHelper.h>
+
+#include <v4l2_codec2/common/V4L2ComponentCommon.h>
+#include <v4l2_codec2/components/V4L2DecodeComponent.h>
+#include <v4l2_codec2/components/V4L2DecodeInterface.h>
+#include <v4l2_codec2/store/V4L2ComponentStore.h>
+
+namespace android {
+
+class V4L2ComponentFactory : public C2ComponentFactory {
+public:
+    V4L2ComponentFactory(const char* componentName, bool isEncoder);
+    ~V4L2ComponentFactory() override;
+
+    // Implementation of C2ComponentFactory.
+    c2_status_t createComponent(c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+                                ComponentDeleter deleter) override;
+    c2_status_t createInterface(c2_node_id_t id,
+                                std::shared_ptr<C2ComponentInterface>* const interface,
+                                InterfaceDeleter deleter) override;
+
+private:
+    const std::string mComponentName;
+    const bool mIsEncoder;
+    std::shared_ptr<C2ReflectorHelper> mReflector;
+};
+
+V4L2ComponentFactory::V4L2ComponentFactory(const char* componentName, bool isEncoder)
+      : mComponentName(componentName), mIsEncoder(isEncoder) {
+    auto componentStore = V4L2ComponentStore::Create();
+    if (componentStore == nullptr) {
+        ALOGE("Could not create V4L2ComponentStore.");
+        return;
+    }
+    mReflector = std::static_pointer_cast<C2ReflectorHelper>(componentStore->getParamReflector());
+}
+
+V4L2ComponentFactory::~V4L2ComponentFactory() = default;
+
+c2_status_t V4L2ComponentFactory::createComponent(c2_node_id_t id,
+                                                  std::shared_ptr<C2Component>* const component,
+                                                  ComponentDeleter deleter) {
+    ALOGV("%s(%d), componentName: %s, isEncoder: %d", __func__, id, mComponentName.c_str(),
+          mIsEncoder);
+
+    if (mReflector == nullptr) {
+        ALOGE("mReflector doesn't exist.");
+        return C2_CORRUPTED;
+    }
+
+    if (mIsEncoder) {
+        // TODO(b/143333813): Fill the encoder component.
+        return C2_BAD_VALUE;
+    } else {
+        *component = V4L2DecodeComponent::create(mComponentName, id, mReflector, deleter);
+        return *component ? C2_OK : C2_BAD_VALUE;
+    }
+}
+
+c2_status_t V4L2ComponentFactory::createInterface(
+        c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+        InterfaceDeleter deleter) {
+    ALOGV("%s(), componentName: %s", __func__, mComponentName.c_str());
+
+    if (mReflector == nullptr) {
+        ALOGE("mReflector doesn't exist.");
+        return C2_CORRUPTED;
+    }
+
+    if (mIsEncoder) {
+        // TODO(b/143333813): Fill the encoder component.
+        return C2_BAD_VALUE;
+    } else {
+        *interface = std::shared_ptr<C2ComponentInterface>(
+                new SimpleInterface<V4L2DecodeInterface>(
+                        mComponentName.c_str(), id,
+                        std::make_shared<V4L2DecodeInterface>(mComponentName, mReflector)),
+                deleter);
+        return C2_OK;
+    }
+}
+
+}  // namespace android
+
+extern "C" ::C2ComponentFactory* CreateCodec2Factory(const char* componentName) {
+    ALOGV("%s(%s)", __func__, componentName);
+
+    if (!android::V4L2ComponentName::isValid(componentName)) {
+        ALOGE("Invalid component name: %s", componentName);
+        return nullptr;
+    }
+
+    bool isEncoder = android::V4L2ComponentName::isEncoder(componentName);
+    return new android::V4L2ComponentFactory(componentName, isEncoder);
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory* factory) {
+    ALOGV("%s()", __func__);
+    delete factory;
+}
diff --git a/components/V4L2DecodeComponent.cpp b/components/V4L2DecodeComponent.cpp
new file mode 100644
index 0000000..64cb4ba
--- /dev/null
+++ b/components/V4L2DecodeComponent.cpp
@@ -0,0 +1,834 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "V4L2DecodeComponent"
+
+#include <v4l2_codec2/components/V4L2DecodeComponent.h>
+
+#include <inttypes.h>
+#include <linux/videodev2.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include <C2.h>
+#include <C2PlatformSupport.h>
+#include <Codec2Mapper.h>
+#include <SimpleC2Interface.h>
+#include <base/bind.h>
+#include <base/callback_helpers.h>
+#include <base/time/time.h>
+#include <log/log.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+
+#include <h264_parser.h>
+#include <v4l2_codec2/components/V4L2Decoder.h>
+#include <v4l2_codec2/components/VideoFramePool.h>
+#include <v4l2_codec2/components/VideoTypes.h>
+
+namespace android {
+namespace {
+// TODO(b/151128291): figure out why we cannot open V4L2Device in 0.5 second?
+const ::base::TimeDelta kBlockingMethodTimeout = ::base::TimeDelta::FromMilliseconds(5000);
+
+// Mask against 30 bits to avoid (undefined) wraparound on signed integer.
+int32_t frameIndexToBitstreamId(c2_cntr64_t frameIndex) {
+    return static_cast<int32_t>(frameIndex.peeku() & 0x3FFFFFFF);
+}
+
+std::unique_ptr<VideoDecoder::BitstreamBuffer> C2BlockToBitstreamBuffer(
+        const C2ConstLinearBlock& block, const int32_t bitstreamId) {
+    const int fd = block.handle()->data[0];
+    auto dupFd = ::base::ScopedFD(dup(fd));
+    if (!dupFd.is_valid()) {
+        ALOGE("Failed to dup(%d) input buffer (bitstreamId=%d), errno=%d", fd, bitstreamId, errno);
+        return nullptr;
+    }
+
+    return std::make_unique<VideoDecoder::BitstreamBuffer>(bitstreamId, std::move(dupFd),
+                                                           block.offset(), block.size());
+}
+
+bool parseCodedColorAspects(const C2ConstLinearBlock& input,
+                            C2StreamColorAspectsInfo::input* codedAspects) {
+    C2ReadView view = input.map().get();
+    const uint8_t* data = view.data();
+    const uint32_t size = view.capacity();
+
+    std::unique_ptr<media::H264Parser> h264Parser = std::make_unique<media::H264Parser>();
+    h264Parser->SetStream(data, static_cast<off_t>(size));
+    media::H264NALU nalu;
+    media::H264Parser::Result parRes = h264Parser->AdvanceToNextNALU(&nalu);
+    if (parRes != media::H264Parser::kEOStream && parRes != media::H264Parser::kOk) {
+        ALOGE("H264 AdvanceToNextNALU error: %d", static_cast<int>(parRes));
+        return false;
+    }
+    if (nalu.nal_unit_type != media::H264NALU::kSPS) {
+        ALOGV("NALU is not SPS");
+        return false;
+    }
+
+    int spsId;
+    parRes = h264Parser->ParseSPS(&spsId);
+    if (parRes != media::H264Parser::kEOStream && parRes != media::H264Parser::kOk) {
+        ALOGE("H264 ParseSPS error: %d", static_cast<int>(parRes));
+        return false;
+    }
+
+    // Parse ISO color aspects from H264 SPS bitstream.
+    const media::H264SPS* sps = h264Parser->GetSPS(spsId);
+    if (!sps->colour_description_present_flag) {
+        ALOGV("No Color Description in SPS");
+        return false;
+    }
+    int32_t primaries = sps->colour_primaries;
+    int32_t transfer = sps->transfer_characteristics;
+    int32_t coeffs = sps->matrix_coefficients;
+    bool fullRange = sps->video_full_range_flag;
+
+    // Convert ISO color aspects to ColorUtils::ColorAspects.
+    ColorAspects colorAspects;
+    ColorUtils::convertIsoColorAspectsToCodecAspects(primaries, transfer, coeffs, fullRange,
+                                                     colorAspects);
+    ALOGV("Parsed ColorAspects from bitstream: (R:%d, P:%d, M:%d, T:%d)", colorAspects.mRange,
+          colorAspects.mPrimaries, colorAspects.mMatrixCoeffs, colorAspects.mTransfer);
+
+    // Map ColorUtils::ColorAspects to C2StreamColorAspectsInfo::input parameter.
+    if (!C2Mapper::map(colorAspects.mPrimaries, &codedAspects->primaries)) {
+        codedAspects->primaries = C2Color::PRIMARIES_UNSPECIFIED;
+    }
+    if (!C2Mapper::map(colorAspects.mRange, &codedAspects->range)) {
+        codedAspects->range = C2Color::RANGE_UNSPECIFIED;
+    }
+    if (!C2Mapper::map(colorAspects.mMatrixCoeffs, &codedAspects->matrix)) {
+        codedAspects->matrix = C2Color::MATRIX_UNSPECIFIED;
+    }
+    if (!C2Mapper::map(colorAspects.mTransfer, &codedAspects->transfer)) {
+        codedAspects->transfer = C2Color::TRANSFER_UNSPECIFIED;
+    }
+
+    return true;
+}
+
+bool isWorkDone(const C2Work& work) {
+    const int32_t bitstreamId = frameIndexToBitstreamId(work.input.ordinal.frameIndex);
+
+    // Exception: EOS work should be processed by reportEOSWork().
+    // Always return false here no matter the work is actually done.
+    if (work.input.flags & C2FrameData::FLAG_END_OF_STREAM) return false;
+
+    // Work is done when all conditions meet:
+    // 1. mDecoder has released the work's input buffer.
+    // 2. mDecoder has returned the work's output buffer in normal case,
+    //    or the input buffer is CSD, or we decide to drop the frame.
+    bool inputReleased = (work.input.buffers.front() == nullptr);
+    bool outputReturned = !work.worklets.front()->output.buffers.empty();
+    bool ignoreOutput = (work.input.flags & C2FrameData::FLAG_CODEC_CONFIG) ||
+                        (work.worklets.front()->output.flags & C2FrameData::FLAG_DROP_FRAME);
+    ALOGV("work(%d): inputReleased: %d, outputReturned: %d, ignoreOutput: %d", bitstreamId,
+          inputReleased, outputReturned, ignoreOutput);
+    return inputReleased && (outputReturned || ignoreOutput);
+}
+
+bool isNoShowFrameWork(const C2Work& work, const C2WorkOrdinalStruct& currOrdinal) {
+    // We consider Work contains no-show frame when all conditions meet:
+    // 1. Work's ordinal is smaller than current ordinal.
+    // 2. Work's output buffer is not returned.
+    // 3. Work is not EOS, CSD, or marked with dropped frame.
+    bool smallOrdinal = (work.input.ordinal.timestamp < currOrdinal.timestamp) &&
+                        (work.input.ordinal.frameIndex < currOrdinal.frameIndex);
+    bool outputReturned = !work.worklets.front()->output.buffers.empty();
+    bool specialWork = (work.input.flags & C2FrameData::FLAG_END_OF_STREAM) ||
+                       (work.input.flags & C2FrameData::FLAG_CODEC_CONFIG) ||
+                       (work.worklets.front()->output.flags & C2FrameData::FLAG_DROP_FRAME);
+    return smallOrdinal && !outputReturned && !specialWork;
+}
+
+}  // namespace
+
+// static
+std::shared_ptr<C2Component> V4L2DecodeComponent::create(
+        const std::string& name, c2_node_id_t id, const std::shared_ptr<C2ReflectorHelper>& helper,
+        C2ComponentFactory::ComponentDeleter deleter) {
+    auto intfImpl = std::make_shared<V4L2DecodeInterface>(name, helper);
+    if (intfImpl->status() != C2_OK) {
+        ALOGE("Failed to initialize V4L2DecodeInterface.");
+        return nullptr;
+    }
+
+    return std::shared_ptr<C2Component>(new V4L2DecodeComponent(name, id, helper, intfImpl),
+                                        deleter);
+}
+
+V4L2DecodeComponent::V4L2DecodeComponent(const std::string& name, c2_node_id_t id,
+                                         const std::shared_ptr<C2ReflectorHelper>& helper,
+                                         const std::shared_ptr<V4L2DecodeInterface>& intfImpl)
+      : mIntfImpl(intfImpl),
+        mIntf(std::make_shared<SimpleInterface<V4L2DecodeInterface>>(name.c_str(), id, mIntfImpl)) {
+    ALOGV("%s(%s)", __func__, name.c_str());
+
+    mIsSecure = name.find(".secure") != std::string::npos;
+    // TODO(b/153608694): Support secure mode.
+    ALOG_ASSERT(!mIsSecure, "Secure mode is not supported yet.");
+}
+
+V4L2DecodeComponent::~V4L2DecodeComponent() {
+    ALOGV("%s()", __func__);
+
+    if (mDecoderThread.IsRunning()) {
+        mDecoderTaskRunner->PostTask(
+                FROM_HERE, ::base::BindOnce(&V4L2DecodeComponent::destroyTask, mWeakThis));
+        mDecoderThread.Stop();
+    }
+    ALOGV("%s() done", __func__);
+}
+
+void V4L2DecodeComponent::destroyTask() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    mWeakThisFactory.InvalidateWeakPtrs();
+    mDecoder = nullptr;
+}
+
+c2_status_t V4L2DecodeComponent::start() {
+    ALOGV("%s()", __func__);
+    std::lock_guard<std::mutex> lock(mStartStopLock);
+
+    auto currentState = mComponentState.load();
+    if (currentState != ComponentState::STOPPED) {
+        ALOGE("Could not start at %s state", ComponentStateToString(currentState));
+        return C2_BAD_STATE;
+    }
+
+    if (!mDecoderThread.Start()) {
+        ALOGE("Decoder thread failed to start.");
+        return C2_CORRUPTED;
+    }
+    mDecoderTaskRunner = mDecoderThread.task_runner();
+    mWeakThis = mWeakThisFactory.GetWeakPtr();
+
+    c2_status_t status = C2_CORRUPTED;
+    mStartStopDone.Reset();
+    mDecoderTaskRunner->PostTask(FROM_HERE,
+                                 ::base::BindOnce(&V4L2DecodeComponent::startTask, mWeakThis,
+                                                  ::base::Unretained(&status)));
+    if (!mStartStopDone.TimedWait(kBlockingMethodTimeout)) {
+        ALOGE("startTask() timeout...");
+        return C2_TIMED_OUT;
+    }
+
+    if (status == C2_OK) mComponentState.store(ComponentState::RUNNING);
+    return status;
+}
+
+void V4L2DecodeComponent::startTask(c2_status_t* status) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    ::base::ScopedClosureRunner done_caller(
+            ::base::BindOnce(&::base::WaitableEvent::Signal, ::base::Unretained(&mStartStopDone)));
+    *status = C2_CORRUPTED;
+
+    const auto codec = mIntfImpl->getVideoCodec();
+    if (!codec) {
+        ALOGE("Failed to get video codec.");
+        return;
+    }
+    const size_t inputBufferSize = mIntfImpl->getInputBufferSize();
+    mDecoder = V4L2Decoder::Create(
+            *codec, inputBufferSize,
+            ::base::BindRepeating(&V4L2DecodeComponent::getVideoFramePool, mWeakThis),
+            ::base::BindRepeating(&V4L2DecodeComponent::onOutputFrameReady, mWeakThis),
+            ::base::BindRepeating(&V4L2DecodeComponent::reportError, mWeakThis, C2_CORRUPTED),
+            mDecoderTaskRunner);
+    if (!mDecoder) {
+        ALOGE("Failed to create V4L2Decoder for %s", VideoCodecToString(*codec));
+        return;
+    }
+
+    // Get default color aspects on start.
+    if (!mIsSecure && *codec == VideoCodec::H264) {
+        if (mIntfImpl->queryColorAspects(&mCurrentColorAspects) != C2_OK) return;
+        mPendingColorAspectsChange = false;
+    }
+
+    *status = C2_OK;
+}
+
+void V4L2DecodeComponent::getVideoFramePool(std::unique_ptr<VideoFramePool>* pool,
+                                            const media::Size& size, HalPixelFormat pixelFormat) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    // Get block pool ID configured from the client.
+    auto poolId = mIntfImpl->getBlockPoolId();
+    ALOGI("Using C2BlockPool ID = %" PRIu64 " for allocating output buffers", poolId);
+    std::shared_ptr<C2BlockPool> blockPool;
+    auto status = GetCodec2BlockPool(poolId, shared_from_this(), &blockPool);
+    if (status != C2_OK) {
+        ALOGE("Graphic block allocator is invalid: %d", status);
+        reportError(status);
+        *pool = nullptr;
+        return;
+    }
+
+    *pool = std::make_unique<VideoFramePool>(std::move(blockPool), size, pixelFormat, mIsSecure,
+                                             mDecoderTaskRunner);
+}
+
+c2_status_t V4L2DecodeComponent::stop() {
+    ALOGV("%s()", __func__);
+    std::lock_guard<std::mutex> lock(mStartStopLock);
+
+    auto currentState = mComponentState.load();
+    if (currentState != ComponentState::RUNNING && currentState != ComponentState::ERROR) {
+        ALOGE("Could not stop at %s state", ComponentStateToString(currentState));
+        return C2_BAD_STATE;
+    }
+
+    // Return immediately if the component is already stopped.
+    if (!mDecoderThread.IsRunning()) return C2_OK;
+
+    mStartStopDone.Reset();
+    mDecoderTaskRunner->PostTask(FROM_HERE,
+                                 ::base::BindOnce(&V4L2DecodeComponent::stopTask, mWeakThis));
+    if (!mStartStopDone.TimedWait(kBlockingMethodTimeout)) {
+        ALOGE("stopTask() timeout...");
+        return C2_TIMED_OUT;
+    }
+
+    mDecoderThread.Stop();
+    mDecoderTaskRunner = nullptr;
+    mComponentState.store(ComponentState::STOPPED);
+    return C2_OK;
+}
+
+void V4L2DecodeComponent::stopTask() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    reportAbandonedWorks();
+    mIsDraining = false;
+    mDecoder = nullptr;
+    mWeakThisFactory.InvalidateWeakPtrs();
+
+    mStartStopDone.Signal();
+}
+
+c2_status_t V4L2DecodeComponent::setListener_vb(
+        const std::shared_ptr<C2Component::Listener>& listener, c2_blocking_t mayBlock) {
+    ALOGV("%s()", __func__);
+
+    auto currentState = mComponentState.load();
+    if (currentState == ComponentState::RELEASED ||
+        (currentState == ComponentState::RUNNING && listener)) {
+        ALOGE("Could not set listener at %s state", ComponentStateToString(currentState));
+        return C2_BAD_STATE;
+    }
+    if (currentState == ComponentState::RUNNING && mayBlock != C2_MAY_BLOCK) {
+        ALOGE("Could not set listener at %s state non-blocking",
+              ComponentStateToString(currentState));
+        return C2_BLOCKING;
+    }
+
+    // If the decoder thread is not running it's safe to update the listener directly.
+    if (!mDecoderThread.IsRunning()) {
+        mListener = listener;
+        return C2_OK;
+    }
+
+    ::base::WaitableEvent done;
+    mDecoderTaskRunner->PostTask(FROM_HERE, ::base::Bind(&V4L2DecodeComponent::setListenerTask,
+                                                         mWeakThis, listener, &done));
+    done.Wait();
+    return C2_OK;
+}
+
+void V4L2DecodeComponent::setListenerTask(const std::shared_ptr<Listener>& listener,
+                                          ::base::WaitableEvent* done) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    mListener = listener;
+    done->Signal();
+}
+
+c2_status_t V4L2DecodeComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* const items) {
+    ALOGV("%s()", __func__);
+
+    auto currentState = mComponentState.load();
+    if (currentState != ComponentState::RUNNING) {
+        ALOGE("Could not queue at state: %s", ComponentStateToString(currentState));
+        return C2_BAD_STATE;
+    }
+
+    while (!items->empty()) {
+        mDecoderTaskRunner->PostTask(FROM_HERE,
+                                     ::base::BindOnce(&V4L2DecodeComponent::queueTask, mWeakThis,
+                                                      std::move(items->front())));
+        items->pop_front();
+    }
+    return C2_OK;
+}
+
+void V4L2DecodeComponent::queueTask(std::unique_ptr<C2Work> work) {
+    ALOGV("%s(): flags=0x%x, index=%llu, timestamp=%llu", __func__, work->input.flags,
+          work->input.ordinal.frameIndex.peekull(), work->input.ordinal.timestamp.peekull());
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    if (work->worklets.size() != 1u || work->input.buffers.size() > 1u) {
+        ALOGE("Invalid work: worklets.size()=%zu, input.buffers.size()=%zu", work->worklets.size(),
+              work->input.buffers.size());
+        work->result = C2_CORRUPTED;
+        reportWork(std::move(work));
+        return;
+    }
+
+    work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(0);
+    work->worklets.front()->output.buffers.clear();
+    work->worklets.front()->output.ordinal = work->input.ordinal;
+    if (work->input.buffers.empty()) {
+        // Client may queue a work with no input buffer for either it's EOS or empty CSD, otherwise
+        // every work must have one input buffer.
+        if ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) == 0 &&
+            (work->input.flags & C2FrameData::FLAG_CODEC_CONFIG) == 0) {
+            ALOGE("Invalid work: work with no input buffer should be EOS or CSD.");
+            reportError(C2_BAD_VALUE);
+            return;
+        }
+
+        // Emplace a nullptr to unify the check for work done.
+        ALOGV("Got a work with no input buffer! Emplace a nullptr inside.");
+        work->input.buffers.emplace_back(nullptr);
+    }
+
+    mPendingWorks.push(std::move(work));
+    pumpPendingWorks();
+}
+
+void V4L2DecodeComponent::pumpPendingWorks() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    auto currentState = mComponentState.load();
+    if (currentState != ComponentState::RUNNING) {
+        ALOGW("Could not pump C2Work at state: %s", ComponentStateToString(currentState));
+        return;
+    }
+
+    while (!mPendingWorks.empty() && !mIsDraining) {
+        std::unique_ptr<C2Work> work(std::move(mPendingWorks.front()));
+        mPendingWorks.pop();
+
+        const int32_t bitstreamId = frameIndexToBitstreamId(work->input.ordinal.frameIndex);
+        const bool isCSDWork = work->input.flags & C2FrameData::FLAG_CODEC_CONFIG;
+        const bool isEmptyWork = work->input.buffers.front() == nullptr;
+        ALOGV("Process C2Work bitstreamId=%d isCSDWork=%d, isEmptyWork=%d", bitstreamId, isCSDWork,
+              isEmptyWork);
+
+        if (work->input.buffers.front() != nullptr) {
+            // If input.buffers is not empty, the buffer should have meaningful content inside.
+            C2ConstLinearBlock linearBlock =
+                    work->input.buffers.front()->data().linearBlocks().front();
+            ALOG_ASSERT(linearBlock.size() > 0u, "Input buffer of work(%d) is empty.", bitstreamId);
+
+            // Try to parse color aspects from bitstream for CSD work of non-secure H264 codec.
+            if (isCSDWork && !mIsSecure && (mIntfImpl->getVideoCodec() == VideoCodec::H264)) {
+                C2StreamColorAspectsInfo::input codedAspects = {0u};
+                if (parseCodedColorAspects(linearBlock, &codedAspects)) {
+                    std::vector<std::unique_ptr<C2SettingResult>> failures;
+                    c2_status_t status =
+                            mIntfImpl->config({&codedAspects}, C2_MAY_BLOCK, &failures);
+                    if (status != C2_OK) {
+                        ALOGE("Failed to config color aspects to interface: %d", status);
+                        reportError(status);
+                        return;
+                    }
+
+                    // Record current frame index, color aspects should be updated only for output
+                    // buffers whose frame indices are not less than this one.
+                    mPendingColorAspectsChange = true;
+                    mPendingColorAspectsChangeFrameIndex = work->input.ordinal.frameIndex.peeku();
+                }
+            }
+
+            auto buffer = C2BlockToBitstreamBuffer(linearBlock, bitstreamId);
+            if (!buffer) {
+                reportError(C2_CORRUPTED);
+                return;
+            }
+            mDecoder->decode(std::move(buffer), ::base::BindOnce(&V4L2DecodeComponent::onDecodeDone,
+                                                                 mWeakThis, bitstreamId));
+        }
+
+        if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+            mDecoder->drain(::base::BindOnce(&V4L2DecodeComponent::onDrainDone, mWeakThis));
+            mIsDraining = true;
+        }
+
+        auto res = mWorksAtDecoder.insert(std::make_pair(bitstreamId, std::move(work)));
+        ALOGW_IF(!res.second, "We already inserted bitstreamId %d to decoder?", bitstreamId);
+
+        // Directly report the empty CSD work as finished.
+        if (isCSDWork && isEmptyWork) reportWorkIfFinished(bitstreamId);
+    }
+}
+
+void V4L2DecodeComponent::onDecodeDone(int32_t bitstreamId, VideoDecoder::DecodeStatus status) {
+    ALOGV("%s(bitstreamId=%d, status=%s)", __func__, bitstreamId,
+          VideoDecoder::DecodeStatusToString(status));
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    switch (status) {
+    case VideoDecoder::DecodeStatus::kAborted:
+        return;
+
+    case VideoDecoder::DecodeStatus::kError:
+        reportError(C2_CORRUPTED);
+        return;
+
+    case VideoDecoder::DecodeStatus::kOk:
+        auto it = mWorksAtDecoder.find(bitstreamId);
+        ALOG_ASSERT(it != mWorksAtDecoder.end());
+        C2Work* work = it->second.get();
+
+        // Release the input buffer.
+        work->input.buffers.front().reset();
+
+        // CSD Work doesn't have output buffer, the corresponding onOutputFrameReady() won't be
+        // called. Push the bitstreamId here.
+        if (work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)
+            mOutputBitstreamIds.push(bitstreamId);
+
+        pumpReportWork();
+        return;
+    }
+}
+
+void V4L2DecodeComponent::onOutputFrameReady(std::unique_ptr<VideoFrame> frame) {
+    ALOGV("%s(bitstreamId=%d)", __func__, frame->getBitstreamId());
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    const int32_t bitstreamId = frame->getBitstreamId();
+    auto it = mWorksAtDecoder.find(bitstreamId);
+    if (it == mWorksAtDecoder.end()) {
+        ALOGE("Work with bitstreamId=%d not found, already abandoned?", bitstreamId);
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    C2Work* work = it->second.get();
+
+    C2ConstGraphicBlock constBlock = std::move(frame)->getGraphicBlock();
+    std::shared_ptr<C2Buffer> buffer = C2Buffer::CreateGraphicBuffer(std::move(constBlock));
+    if (mPendingColorAspectsChange &&
+        work->input.ordinal.frameIndex.peeku() >= mPendingColorAspectsChangeFrameIndex) {
+        mIntfImpl->queryColorAspects(&mCurrentColorAspects);
+        mPendingColorAspectsChange = false;
+    }
+    if (mCurrentColorAspects) {
+        buffer->setInfo(mCurrentColorAspects);
+    }
+    work->worklets.front()->output.buffers.emplace_back(std::move(buffer));
+
+    // Check no-show frame by timestamps for VP8/VP9 cases before reporting the current work.
+    if (mIntfImpl->getVideoCodec() == VideoCodec::VP8 ||
+        mIntfImpl->getVideoCodec() == VideoCodec::VP9) {
+        detectNoShowFrameWorksAndReportIfFinished(work->input.ordinal);
+    }
+
+    mOutputBitstreamIds.push(bitstreamId);
+    pumpReportWork();
+}
+
+void V4L2DecodeComponent::detectNoShowFrameWorksAndReportIfFinished(
+        const C2WorkOrdinalStruct& currOrdinal) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    std::vector<int32_t> noShowFrameBitstreamIds;
+    for (auto& kv : mWorksAtDecoder) {
+        const int32_t bitstreamId = kv.first;
+        const C2Work* work = kv.second.get();
+
+        // A work in mWorksAtDecoder would be considered to have no-show frame if there is no
+        // corresponding output buffer returned while the one of the work with latter timestamp is
+        // already returned. (VD is outputted in display order.)
+        if (isNoShowFrameWork(*work, currOrdinal)) {
+            work->worklets.front()->output.flags = C2FrameData::FLAG_DROP_FRAME;
+
+            // We need to call reportWorkIfFinished() for all detected no-show frame works. However,
+            // we should do it after the detection loop since reportWorkIfFinished() may erase
+            // entries in |mWorksAtDecoder|.
+            noShowFrameBitstreamIds.push_back(bitstreamId);
+            ALOGV("Detected no-show frame work index=%llu timestamp=%llu",
+                  work->input.ordinal.frameIndex.peekull(),
+                  work->input.ordinal.timestamp.peekull());
+        }
+    }
+
+    // Try to report works with no-show frame.
+    for (const int32_t bitstreamId : noShowFrameBitstreamIds) reportWorkIfFinished(bitstreamId);
+}
+
+void V4L2DecodeComponent::pumpReportWork() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    while (!mOutputBitstreamIds.empty()) {
+        if (!reportWorkIfFinished(mOutputBitstreamIds.front())) break;
+        mOutputBitstreamIds.pop();
+    }
+}
+
+bool V4L2DecodeComponent::reportWorkIfFinished(int32_t bitstreamId) {
+    ALOGV("%s(bitstreamId = %d)", __func__, bitstreamId);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    // EOS work will not be reported here. reportEOSWork() does it.
+    if (mIsDraining && mWorksAtDecoder.size() == 1u) {
+        ALOGV("work(bitstreamId = %d) is EOS Work.", bitstreamId);
+        return false;
+    }
+
+    auto it = mWorksAtDecoder.find(bitstreamId);
+    ALOG_ASSERT(it != mWorksAtDecoder.end());
+
+    if (!isWorkDone(*(it->second))) {
+        ALOGV("work(bitstreamId = %d) is not done yet.", bitstreamId);
+        return false;
+    }
+
+    std::unique_ptr<C2Work> work = std::move(it->second);
+    mWorksAtDecoder.erase(it);
+
+    work->result = C2_OK;
+    work->workletsProcessed = static_cast<uint32_t>(work->worklets.size());
+    // A work with neither flags nor output buffer would be treated as no-corresponding
+    // output by C2 framework, and regain pipeline capacity immediately.
+    if (work->worklets.front()->output.flags & C2FrameData::FLAG_DROP_FRAME)
+        work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(0);
+
+    return reportWork(std::move(work));
+}
+
+bool V4L2DecodeComponent::reportEOSWork() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    // In this moment all works prior to EOS work should be done and returned to listener.
+    if (mWorksAtDecoder.size() != 1u) {
+        ALOGE("It shouldn't have remaining works in mWorksAtDecoder except EOS work.");
+        for (const auto& kv : mWorksAtDecoder) {
+            ALOGE("bitstreamId(%d) => Work index=%llu, timestamp=%llu", kv.first,
+                  kv.second->input.ordinal.frameIndex.peekull(),
+                  kv.second->input.ordinal.timestamp.peekull());
+        }
+        return false;
+    }
+
+    std::unique_ptr<C2Work> eosWork(std::move(mWorksAtDecoder.begin()->second));
+    mWorksAtDecoder.clear();
+
+    eosWork->result = C2_OK;
+    eosWork->workletsProcessed = static_cast<uint32_t>(eosWork->worklets.size());
+    eosWork->worklets.front()->output.flags = C2FrameData::FLAG_END_OF_STREAM;
+    if (!eosWork->input.buffers.empty()) eosWork->input.buffers.front().reset();
+
+    return reportWork(std::move(eosWork));
+}
+
+bool V4L2DecodeComponent::reportWork(std::unique_ptr<C2Work> work) {
+    ALOGV("%s(work=%llu)", __func__, work->input.ordinal.frameIndex.peekull());
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    if (!mListener) {
+        ALOGE("mListener is nullptr, setListener_vb() not called?");
+        return false;
+    }
+
+    std::list<std::unique_ptr<C2Work>> finishedWorks;
+    finishedWorks.emplace_back(std::move(work));
+    mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorks));
+    return true;
+}
+
+c2_status_t V4L2DecodeComponent::flush_sm(
+        flush_mode_t mode, std::list<std::unique_ptr<C2Work>>* const /* flushedWork */) {
+    ALOGV("%s()", __func__);
+
+    auto currentState = mComponentState.load();
+    if (currentState != ComponentState::RUNNING) {
+        ALOGE("Could not flush at state: %s", ComponentStateToString(currentState));
+        return C2_BAD_STATE;
+    }
+    if (mode != FLUSH_COMPONENT) {
+        return C2_OMITTED;  // Tunneling is not supported by now
+    }
+
+    mDecoderTaskRunner->PostTask(FROM_HERE,
+                                 ::base::BindOnce(&V4L2DecodeComponent::flushTask, mWeakThis));
+    return C2_OK;
+}
+
+void V4L2DecodeComponent::flushTask() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    mDecoder->flush();
+    reportAbandonedWorks();
+
+    // Pending EOS work will be abandoned here due to component flush if any.
+    mIsDraining = false;
+}
+
+void V4L2DecodeComponent::reportAbandonedWorks() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    std::list<std::unique_ptr<C2Work>> abandonedWorks;
+    while (!mPendingWorks.empty()) {
+        abandonedWorks.emplace_back(std::move(mPendingWorks.front()));
+        mPendingWorks.pop();
+    }
+    for (auto& kv : mWorksAtDecoder) {
+        abandonedWorks.emplace_back(std::move(kv.second));
+    }
+    mWorksAtDecoder.clear();
+
+    for (auto& work : abandonedWorks) {
+        // TODO: correlate the definition of flushed work result to framework.
+        work->result = C2_NOT_FOUND;
+        // When the work is abandoned, buffer in input.buffers shall reset by component.
+        if (!work->input.buffers.empty()) {
+            work->input.buffers.front().reset();
+        }
+    }
+    if (!abandonedWorks.empty()) {
+        if (!mListener) {
+            ALOGE("mListener is nullptr, setListener_vb() not called?");
+            return;
+        }
+        mListener->onWorkDone_nb(shared_from_this(), std::move(abandonedWorks));
+    }
+}
+
+c2_status_t V4L2DecodeComponent::drain_nb(drain_mode_t mode) {
+    ALOGV("%s(mode=%u)", __func__, mode);
+
+    auto currentState = mComponentState.load();
+    if (currentState != ComponentState::RUNNING) {
+        ALOGE("Could not drain at state: %s", ComponentStateToString(currentState));
+        return C2_BAD_STATE;
+    }
+
+    switch (mode) {
+    case DRAIN_CHAIN:
+        return C2_OMITTED;  // Tunneling is not supported.
+
+    case DRAIN_COMPONENT_NO_EOS:
+        return C2_OK;  // Do nothing special.
+
+    case DRAIN_COMPONENT_WITH_EOS:
+        mDecoderTaskRunner->PostTask(FROM_HERE,
+                                     ::base::BindOnce(&V4L2DecodeComponent::drainTask, mWeakThis));
+        return C2_OK;
+    }
+}
+
+void V4L2DecodeComponent::drainTask() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    if (!mPendingWorks.empty()) {
+        ALOGV("Set EOS flag at last queued work.");
+        auto& flags = mPendingWorks.back()->input.flags;
+        flags = static_cast<C2FrameData::flags_t>(flags | C2FrameData::FLAG_END_OF_STREAM);
+        return;
+    }
+
+    if (!mWorksAtDecoder.empty()) {
+        ALOGV("Drain the pending works at the decoder.");
+        mDecoder->drain(::base::BindOnce(&V4L2DecodeComponent::onDrainDone, mWeakThis));
+        mIsDraining = true;
+    }
+}
+
+void V4L2DecodeComponent::onDrainDone(VideoDecoder::DecodeStatus status) {
+    ALOGV("%s(status=%s)", __func__, VideoDecoder::DecodeStatusToString(status));
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    switch (status) {
+    case VideoDecoder::DecodeStatus::kAborted:
+        return;
+
+    case VideoDecoder::DecodeStatus::kError:
+        reportError(C2_CORRUPTED);
+        return;
+
+    case VideoDecoder::DecodeStatus::kOk:
+        mIsDraining = false;
+        if (!reportEOSWork()) {
+            reportError(C2_CORRUPTED);
+            return;
+        }
+
+        mDecoderTaskRunner->PostTask(
+                FROM_HERE, ::base::BindOnce(&V4L2DecodeComponent::pumpPendingWorks, mWeakThis));
+        return;
+    }
+}
+
+void V4L2DecodeComponent::reportError(c2_status_t error) {
+    ALOGE("%s(error=%u)", __func__, static_cast<uint32_t>(error));
+    ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
+
+    if (mComponentState.load() == ComponentState::ERROR) return;
+    mComponentState.store(ComponentState::ERROR);
+
+    if (!mListener) {
+        ALOGE("mListener is nullptr, setListener_vb() not called?");
+        return;
+    }
+    mListener->onError_nb(shared_from_this(), static_cast<uint32_t>(error));
+}
+
+c2_status_t V4L2DecodeComponent::reset() {
+    ALOGV("%s()", __func__);
+
+    return stop();
+}
+
+c2_status_t V4L2DecodeComponent::release() {
+    ALOGV("%s()", __func__);
+
+    c2_status_t ret = reset();
+    mComponentState.store(ComponentState::RELEASED);
+    return ret;
+}
+
+c2_status_t V4L2DecodeComponent::announce_nb(const std::vector<C2WorkOutline>& /* items */) {
+    return C2_OMITTED;  // Tunneling is not supported by now
+}
+
+std::shared_ptr<C2ComponentInterface> V4L2DecodeComponent::intf() {
+    return mIntf;
+}
+
+// static
+const char* V4L2DecodeComponent::ComponentStateToString(ComponentState state) {
+    switch (state) {
+    case ComponentState::STOPPED:
+        return "STOPPED";
+    case ComponentState::RUNNING:
+        return "RUNNING";
+    case ComponentState::RELEASED:
+        return "RELEASED";
+    case ComponentState::ERROR:
+        return "ERROR";
+    }
+}
+
+}  // namespace android
diff --git a/components/V4L2DecodeInterface.cpp b/components/V4L2DecodeInterface.cpp
new file mode 100644
index 0000000..c83d297
--- /dev/null
+++ b/components/V4L2DecodeInterface.cpp
@@ -0,0 +1,347 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "V4L2DecodeInterface"
+
+#include <v4l2_codec2/components/V4L2DecodeInterface.h>
+
+#include <C2PlatformSupport.h>
+#include <SimpleC2Interface.h>
+#include <android/hardware/graphics/common/1.0/types.h>
+#include <log/log.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+
+#include <v4l2_codec2/common/V4L2ComponentCommon.h>
+#include <v4l2_device.h>
+
+namespace android {
+namespace {
+
+constexpr size_t k1080pArea = 1920 * 1088;
+constexpr size_t k4KArea = 3840 * 2160;
+// Input bitstream buffer size for up to 1080p streams.
+constexpr size_t kInputBufferSizeFor1080p = 1024 * 1024;  // 1MB
+// Input bitstream buffer size for up to 4k streams.
+constexpr size_t kInputBufferSizeFor4K = 4 * kInputBufferSizeFor1080p;
+
+// Supported V4L2 input formats. Currently we only support stateful API.
+constexpr uint32_t kSupportedInputFourccs[] = {
+        V4L2_PIX_FMT_H264,
+        V4L2_PIX_FMT_VP8,
+        V4L2_PIX_FMT_VP9,
+};
+
+std::optional<VideoCodec> getCodecFromComponentName(const std::string& name) {
+    if (name == V4L2ComponentName::kH264Decoder || name == V4L2ComponentName::kH264SecureDecoder)
+        return VideoCodec::H264;
+    if (name == V4L2ComponentName::kVP8Decoder || name == V4L2ComponentName::kVP8SecureDecoder)
+        return VideoCodec::VP8;
+    if (name == V4L2ComponentName::kVP9Decoder || name == V4L2ComponentName::kVP9SecureDecoder)
+        return VideoCodec::VP9;
+
+    ALOGE("Unknown name: %s", name.c_str());
+    return std::nullopt;
+}
+
+size_t calculateInputBufferSize(size_t area) {
+    if (area > k4KArea) {
+        ALOGW("Input buffer size for video size (%zu) larger than 4K (%zu) might be too small.",
+              area, k4KArea);
+    }
+
+    // Enlarge the input buffer for 4k video
+    if (area > k1080pArea) return kInputBufferSizeFor4K;
+    return kInputBufferSizeFor1080p;
+}
+
+}  // namespace
+
+// static
+C2R V4L2DecodeInterface::ProfileLevelSetter(bool /* mayBlock */,
+                                            C2P<C2StreamProfileLevelInfo::input>& info) {
+    return info.F(info.v.profile)
+            .validatePossible(info.v.profile)
+            .plus(info.F(info.v.level).validatePossible(info.v.level));
+}
+
+// static
+C2R V4L2DecodeInterface::SizeSetter(bool /* mayBlock */,
+                                    C2P<C2StreamPictureSizeInfo::output>& videoSize) {
+    return videoSize.F(videoSize.v.width)
+            .validatePossible(videoSize.v.width)
+            .plus(videoSize.F(videoSize.v.height).validatePossible(videoSize.v.height));
+}
+
+// static
+template <typename T>
+C2R V4L2DecodeInterface::DefaultColorAspectsSetter(bool /* mayBlock */, C2P<T>& def) {
+    if (def.v.range > C2Color::RANGE_OTHER) {
+        def.set().range = C2Color::RANGE_OTHER;
+    }
+    if (def.v.primaries > C2Color::PRIMARIES_OTHER) {
+        def.set().primaries = C2Color::PRIMARIES_OTHER;
+    }
+    if (def.v.transfer > C2Color::TRANSFER_OTHER) {
+        def.set().transfer = C2Color::TRANSFER_OTHER;
+    }
+    if (def.v.matrix > C2Color::MATRIX_OTHER) {
+        def.set().matrix = C2Color::MATRIX_OTHER;
+    }
+    return C2R::Ok();
+}
+
+// static
+C2R V4L2DecodeInterface::MergedColorAspectsSetter(
+        bool /* mayBlock */, C2P<C2StreamColorAspectsInfo::output>& merged,
+        const C2P<C2StreamColorAspectsTuning::output>& def,
+        const C2P<C2StreamColorAspectsInfo::input>& coded) {
+    // Take coded values for all specified fields, and default values for unspecified ones.
+    merged.set().range = coded.v.range == RANGE_UNSPECIFIED ? def.v.range : coded.v.range;
+    merged.set().primaries =
+            coded.v.primaries == PRIMARIES_UNSPECIFIED ? def.v.primaries : coded.v.primaries;
+    merged.set().transfer =
+            coded.v.transfer == TRANSFER_UNSPECIFIED ? def.v.transfer : coded.v.transfer;
+    merged.set().matrix = coded.v.matrix == MATRIX_UNSPECIFIED ? def.v.matrix : coded.v.matrix;
+    return C2R::Ok();
+}
+
+// static
+C2R V4L2DecodeInterface::MaxInputBufferSizeCalculator(
+        bool /* mayBlock */, C2P<C2StreamMaxBufferSizeInfo::input>& me,
+        const C2P<C2StreamPictureSizeInfo::output>& size) {
+    me.set().value = calculateInputBufferSize(size.v.width * size.v.height);
+    return C2R::Ok();
+}
+
+V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
+                                         const std::shared_ptr<C2ReflectorHelper>& helper)
+      : C2InterfaceHelper(helper), mInitStatus(C2_OK) {
+    ALOGV("%s(%s)", __func__, name.c_str());
+
+    setDerivedInstance(this);
+
+    mVideoCodec = getCodecFromComponentName(name);
+    if (!mVideoCodec) {
+        ALOGE("Invalid component name: %s", name.c_str());
+        mInitStatus = C2_BAD_VALUE;
+        return;
+    }
+
+    std::string inputMime;
+    switch (*mVideoCodec) {
+    case VideoCodec::H264:
+        inputMime = MEDIA_MIMETYPE_VIDEO_AVC;
+        addParameter(
+                DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                        .withDefault(new C2StreamProfileLevelInfo::input(
+                                0u, C2Config::PROFILE_AVC_MAIN, C2Config::LEVEL_AVC_4))
+                        .withFields(
+                                {C2F(mProfileLevel, profile)
+                                         .oneOf({C2Config::PROFILE_AVC_BASELINE,
+                                                 C2Config::PROFILE_AVC_CONSTRAINED_BASELINE,
+                                                 C2Config::PROFILE_AVC_MAIN,
+                                                 C2Config::PROFILE_AVC_HIGH,
+                                                 C2Config::PROFILE_AVC_CONSTRAINED_HIGH}),
+                                 C2F(mProfileLevel, level)
+                                         .oneOf({C2Config::LEVEL_AVC_1, C2Config::LEVEL_AVC_1B,
+                                                 C2Config::LEVEL_AVC_1_1, C2Config::LEVEL_AVC_1_2,
+                                                 C2Config::LEVEL_AVC_1_3, C2Config::LEVEL_AVC_2,
+                                                 C2Config::LEVEL_AVC_2_1, C2Config::LEVEL_AVC_2_2,
+                                                 C2Config::LEVEL_AVC_3, C2Config::LEVEL_AVC_3_1,
+                                                 C2Config::LEVEL_AVC_3_2, C2Config::LEVEL_AVC_4,
+                                                 C2Config::LEVEL_AVC_4_1, C2Config::LEVEL_AVC_4_2,
+                                                 C2Config::LEVEL_AVC_5, C2Config::LEVEL_AVC_5_1,
+                                                 C2Config::LEVEL_AVC_5_2})})
+                        .withSetter(ProfileLevelSetter)
+                        .build());
+        break;
+
+    case VideoCodec::VP8:
+        inputMime = MEDIA_MIMETYPE_VIDEO_VP8;
+        addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                             .withConstValue(new C2StreamProfileLevelInfo::input(
+                                     0u, C2Config::PROFILE_UNUSED, C2Config::LEVEL_UNUSED))
+                             .build());
+        break;
+
+    case VideoCodec::VP9:
+        inputMime = MEDIA_MIMETYPE_VIDEO_VP9;
+        addParameter(
+                DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                        .withDefault(new C2StreamProfileLevelInfo::input(
+                                0u, C2Config::PROFILE_VP9_0, C2Config::LEVEL_VP9_5))
+                        .withFields({C2F(mProfileLevel, profile).oneOf({C2Config::PROFILE_VP9_0}),
+                                     C2F(mProfileLevel, level)
+                                             .oneOf({C2Config::LEVEL_VP9_1, C2Config::LEVEL_VP9_1_1,
+                                                     C2Config::LEVEL_VP9_2, C2Config::LEVEL_VP9_2_1,
+                                                     C2Config::LEVEL_VP9_3, C2Config::LEVEL_VP9_3_1,
+                                                     C2Config::LEVEL_VP9_4, C2Config::LEVEL_VP9_4_1,
+                                                     C2Config::LEVEL_VP9_5})})
+                        .withSetter(ProfileLevelSetter)
+                        .build());
+        break;
+    }
+
+    auto device = media::V4L2Device::Create();
+    const auto supportedProfiles = device->GetSupportedDecodeProfiles(
+            base::size(kSupportedInputFourccs), kSupportedInputFourccs);
+    if (supportedProfiles.empty()) {
+        ALOGE("Failed to get supported profiles from V4L2 device.");
+        mInitStatus = C2_BAD_VALUE;
+        return;
+    }
+
+    mMinSize = supportedProfiles[0].min_resolution;
+    mMaxSize = supportedProfiles[0].max_resolution;
+
+    addParameter(
+            DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                    .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
+                    .build());
+    addParameter(
+            DefineParam(mInputMemoryUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
+                    .withConstValue(new C2StreamUsageTuning::input(
+                            0u, static_cast<uint64_t>(android::hardware::graphics::common::V1_0::
+                                                              BufferUsage::VIDEO_DECODER)))
+                    .build());
+
+    addParameter(DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                         .withConstValue(
+                                 new C2StreamBufferTypeSetting::output(0u, C2BufferData::GRAPHIC))
+                         .build());
+
+    addParameter(DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                         .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
+                                 inputMime.c_str()))
+                         .build());
+
+    addParameter(DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                         .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
+                                 MEDIA_MIMETYPE_VIDEO_RAW))
+                         .build());
+
+    addParameter(
+            DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+                    .withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
+                    .withFields({
+                            C2F(mSize, width).inRange(mMinSize.width(), mMaxSize.width(), 16),
+                            C2F(mSize, height).inRange(mMinSize.height(), mMaxSize.height(), 16),
+                    })
+                    .withSetter(SizeSetter)
+                    .build());
+
+    addParameter(
+            DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
+                    .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kInputBufferSizeFor1080p))
+                    .withFields({
+                            C2F(mMaxInputSize, value).any(),
+                    })
+                    .calculatedAs(MaxInputBufferSizeCalculator, mSize)
+                    .build());
+
+    // TODO(b/153608694): Support secure mode.
+    const C2Allocator::id_t inputAllocators[] = {C2PlatformAllocatorStore::BLOB};
+    const C2Allocator::id_t outputAllocators[] = {C2AllocatorStore::DEFAULT_GRAPHIC};
+    const C2Allocator::id_t surfaceAllocator = C2PlatformAllocatorStore::BUFFERQUEUE;
+    const C2BlockPool::local_id_t outputBlockPools[] = {C2BlockPool::BASIC_GRAPHIC};
+
+    addParameter(
+            DefineParam(mInputAllocatorIds, C2_PARAMKEY_INPUT_ALLOCATORS)
+                    .withConstValue(C2PortAllocatorsTuning::input::AllocShared(inputAllocators))
+                    .build());
+
+    addParameter(
+            DefineParam(mOutputAllocatorIds, C2_PARAMKEY_OUTPUT_ALLOCATORS)
+                    .withConstValue(C2PortAllocatorsTuning::output::AllocShared(outputAllocators))
+                    .build());
+
+    addParameter(DefineParam(mOutputSurfaceAllocatorId, C2_PARAMKEY_OUTPUT_SURFACE_ALLOCATOR)
+                         .withConstValue(new C2PortSurfaceAllocatorTuning::output(surfaceAllocator))
+                         .build());
+
+    addParameter(
+            DefineParam(mOutputBlockPoolIds, C2_PARAMKEY_OUTPUT_BLOCK_POOLS)
+                    .withDefault(C2PortBlockPoolsTuning::output::AllocShared(outputBlockPools))
+                    .withFields({C2F(mOutputBlockPoolIds, m.values[0]).any(),
+                                 C2F(mOutputBlockPoolIds, m.values).inRange(0, 1)})
+                    .withSetter(Setter<C2PortBlockPoolsTuning::output>::NonStrictValuesWithNoDeps)
+                    .build());
+
+    addParameter(
+            DefineParam(mDefaultColorAspects, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS)
+                    .withDefault(new C2StreamColorAspectsTuning::output(
+                            0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
+                            C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+                    .withFields(
+                            {C2F(mDefaultColorAspects, range)
+                                     .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
+                             C2F(mDefaultColorAspects, primaries)
+                                     .inRange(C2Color::PRIMARIES_UNSPECIFIED,
+                                              C2Color::PRIMARIES_OTHER),
+                             C2F(mDefaultColorAspects, transfer)
+                                     .inRange(C2Color::TRANSFER_UNSPECIFIED,
+                                              C2Color::TRANSFER_OTHER),
+                             C2F(mDefaultColorAspects, matrix)
+                                     .inRange(C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)})
+                    .withSetter(DefaultColorAspectsSetter)
+                    .build());
+
+    addParameter(
+            DefineParam(mCodedColorAspects, C2_PARAMKEY_VUI_COLOR_ASPECTS)
+                    .withDefault(new C2StreamColorAspectsInfo::input(
+                            0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_UNSPECIFIED,
+                            C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+                    .withFields(
+                            {C2F(mCodedColorAspects, range)
+                                     .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
+                             C2F(mCodedColorAspects, primaries)
+                                     .inRange(C2Color::PRIMARIES_UNSPECIFIED,
+                                              C2Color::PRIMARIES_OTHER),
+                             C2F(mCodedColorAspects, transfer)
+                                     .inRange(C2Color::TRANSFER_UNSPECIFIED,
+                                              C2Color::TRANSFER_OTHER),
+                             C2F(mCodedColorAspects, matrix)
+                                     .inRange(C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)})
+                    .withSetter(DefaultColorAspectsSetter)
+                    .build());
+
+    addParameter(
+            DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
+                    .withDefault(new C2StreamColorAspectsInfo::output(
+                            0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
+                            C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+                    .withFields(
+                            {C2F(mColorAspects, range)
+                                     .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
+                             C2F(mColorAspects, primaries)
+                                     .inRange(C2Color::PRIMARIES_UNSPECIFIED,
+                                              C2Color::PRIMARIES_OTHER),
+                             C2F(mColorAspects, transfer)
+                                     .inRange(C2Color::TRANSFER_UNSPECIFIED,
+                                              C2Color::TRANSFER_OTHER),
+                             C2F(mColorAspects, matrix)
+                                     .inRange(C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)})
+                    .withSetter(MergedColorAspectsSetter, mDefaultColorAspects, mCodedColorAspects)
+                    .build());
+}
+
+size_t V4L2DecodeInterface::getInputBufferSize() const {
+    return calculateInputBufferSize(getMaxSize().GetArea());
+}
+
+c2_status_t V4L2DecodeInterface::queryColorAspects(
+        std::shared_ptr<C2StreamColorAspectsInfo::output>* targetColorAspects) {
+    std::unique_ptr<C2StreamColorAspectsInfo::output> colorAspects =
+            std::make_unique<C2StreamColorAspectsInfo::output>(
+                    0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
+                    C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED);
+    c2_status_t status = query({colorAspects.get()}, {}, C2_DONT_BLOCK, nullptr);
+    if (status == C2_OK) {
+        *targetColorAspects = std::move(colorAspects);
+    }
+    return status;
+}
+
+}  // namespace android
diff --git a/components/V4L2Decoder.cpp b/components/V4L2Decoder.cpp
new file mode 100644
index 0000000..59e3b0c
--- /dev/null
+++ b/components/V4L2Decoder.cpp
@@ -0,0 +1,665 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "V4L2Decoder"
+
+#include <v4l2_codec2/components/V4L2Decoder.h>
+
+#include <stdint.h>
+
+#include <base/bind.h>
+#include <base/memory/ptr_util.h>
+#include <log/log.h>
+
+namespace android {
+namespace {
+
+constexpr size_t kNumInputBuffers = 16;
+// Extra buffers for transmitting in the whole video pipeline.
+constexpr size_t kNumExtraOutputBuffers = 4;
+
+uint32_t VideoCodecToV4L2PixFmt(VideoCodec codec) {
+    switch (codec) {
+    case VideoCodec::H264:
+        return V4L2_PIX_FMT_H264;
+    case VideoCodec::VP8:
+        return V4L2_PIX_FMT_VP8;
+    case VideoCodec::VP9:
+        return V4L2_PIX_FMT_VP9;
+    }
+}
+
+}  // namespace
+
+// static
+std::unique_ptr<VideoDecoder> V4L2Decoder::Create(
+        const VideoCodec& codec, const size_t inputBufferSize, GetPoolCB getPoolCb,
+        OutputCB outputCb, ErrorCB errorCb, scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
+    std::unique_ptr<V4L2Decoder> decoder =
+            ::base::WrapUnique<V4L2Decoder>(new V4L2Decoder(taskRunner));
+    if (!decoder->start(codec, inputBufferSize, std::move(getPoolCb), std::move(outputCb),
+                        std::move(errorCb))) {
+        return nullptr;
+    }
+    return decoder;
+}
+
+V4L2Decoder::V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner)
+      : mTaskRunner(std::move(taskRunner)) {
+    ALOGV("%s()", __func__);
+
+    mWeakThis = mWeakThisFactory.GetWeakPtr();
+}
+
+V4L2Decoder::~V4L2Decoder() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    mWeakThisFactory.InvalidateWeakPtrs();
+
+    // Streamoff input and output queue.
+    if (mOutputQueue) {
+        mOutputQueue->Streamoff();
+        mOutputQueue->DeallocateBuffers();
+        mOutputQueue = nullptr;
+    }
+    if (mInputQueue) {
+        mInputQueue->Streamoff();
+        mInputQueue->DeallocateBuffers();
+        mInputQueue = nullptr;
+    }
+    if (mDevice) {
+        mDevice->StopPolling();
+        mDevice = nullptr;
+    }
+}
+
+bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize, GetPoolCB getPoolCb,
+                        OutputCB outputCb, ErrorCB errorCb) {
+    ALOGV("%s(codec=%s, inputBufferSize=%zu)", __func__, VideoCodecToString(codec),
+          inputBufferSize);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    mGetPoolCb = std::move(getPoolCb);
+    mOutputCb = std::move(outputCb);
+    mErrorCb = std::move(errorCb);
+
+    if (mState == State::Error) {
+        ALOGE("Ignore due to error state.");
+        return false;
+    }
+
+    mDevice = media::V4L2Device::Create();
+
+    const uint32_t inputPixelFormat = VideoCodecToV4L2PixFmt(codec);
+    if (!mDevice->Open(media::V4L2Device::Type::kDecoder, inputPixelFormat)) {
+        ALOGE("Failed to open device for %s", VideoCodecToString(codec));
+        return false;
+    }
+
+    if (!mDevice->HasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
+        ALOGE("Device does not have VIDEO_M2M_MPLANE and STREAMING capabilities.");
+        return false;
+    }
+
+    struct v4l2_decoder_cmd cmd;
+    memset(&cmd, 0, sizeof(cmd));
+    cmd.cmd = V4L2_DEC_CMD_STOP;
+    if (mDevice->Ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
+        ALOGE("Device does not support flushing (V4L2_DEC_CMD_STOP)");
+        return false;
+    }
+
+    // Subscribe to the resolution change event.
+    struct v4l2_event_subscription sub;
+    memset(&sub, 0, sizeof(sub));
+    sub.type = V4L2_EVENT_SOURCE_CHANGE;
+    if (mDevice->Ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
+        ALOGE("ioctl() failed: VIDIOC_SUBSCRIBE_EVENT: V4L2_EVENT_SOURCE_CHANGE");
+        return false;
+    }
+
+    // Create Input/Output V4L2Queue, and setup input queue.
+    mInputQueue = mDevice->GetQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+    mOutputQueue = mDevice->GetQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+    if (!mInputQueue || !mOutputQueue) {
+        ALOGE("Failed to create V4L2 queue.");
+        return false;
+    }
+    if (!setupInputFormat(inputPixelFormat, inputBufferSize)) {
+        ALOGE("Failed to setup input format.");
+        return false;
+    }
+
+    if (!mDevice->StartPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
+                               ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
+        ALOGE("Failed to start polling V4L2 device.");
+        return false;
+    }
+
+    setState(State::Idle);
+    return true;
+}
+
+bool V4L2Decoder::setupInputFormat(const uint32_t inputPixelFormat, const size_t inputBufferSize) {
+    ALOGV("%s(inputPixelFormat=%u, inputBufferSize=%zu)", __func__, inputPixelFormat,
+          inputBufferSize);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    // Check if the format is supported.
+    std::vector<uint32_t> formats =
+            mDevice->EnumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+    if (std::find(formats.begin(), formats.end(), inputPixelFormat) == formats.end()) {
+        ALOGE("Input codec s not supported by device.");
+        return false;
+    }
+
+    // Setup the input format.
+    auto format = mInputQueue->SetFormat(inputPixelFormat, media::Size(), inputBufferSize);
+    if (!format) {
+        ALOGE("Failed to call IOCTL to set input format.");
+        return false;
+    }
+    ALOG_ASSERT(format->fmt.pix_mp.pixelformat == inputPixelFormat);
+
+    if (mInputQueue->AllocateBuffers(kNumInputBuffers, V4L2_MEMORY_DMABUF) == 0) {
+        ALOGE("Failed to allocate input buffer.");
+        return false;
+    }
+    if (!mInputQueue->Streamon()) {
+        ALOGE("Failed to streamon input queue.");
+        return false;
+    }
+    return true;
+}
+
+void V4L2Decoder::decode(std::unique_ptr<BitstreamBuffer> buffer, DecodeCB decodeCb) {
+    ALOGV("%s(id=%d)", __func__, buffer->id);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    if (mState == State::Error) {
+        ALOGE("Ignore due to error state.");
+        mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(decodeCb),
+                                                          VideoDecoder::DecodeStatus::kError));
+        return;
+    }
+
+    if (mState == State::Idle) {
+        setState(State::Decoding);
+    }
+
+    mDecodeRequests.push(DecodeRequest(std::move(buffer), std::move(decodeCb)));
+    pumpDecodeRequest();
+}
+
+void V4L2Decoder::drain(DecodeCB drainCb) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    switch (mState) {
+    case State::Idle:
+        ALOGD("Nothing need to drain, ignore.");
+        mTaskRunner->PostTask(
+                FROM_HERE, ::base::BindOnce(std::move(drainCb), VideoDecoder::DecodeStatus::kOk));
+        return;
+
+    case State::Decoding:
+        mDecodeRequests.push(DecodeRequest(nullptr, std::move(drainCb)));
+        pumpDecodeRequest();
+        return;
+
+    case State::Draining:
+    case State::Error:
+        ALOGE("Ignore due to wrong state: %s", StateToString(mState));
+        mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(drainCb),
+                                                          VideoDecoder::DecodeStatus::kError));
+        return;
+    }
+}
+
+void V4L2Decoder::pumpDecodeRequest() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    if (mState != State::Decoding) return;
+
+    while (!mDecodeRequests.empty()) {
+        // Drain the decoder.
+        if (mDecodeRequests.front().buffer == nullptr) {
+            ALOGV("Get drain request.");
+            // Send the flush command after all input buffers are dequeued. This makes
+            // sure all previous resolution changes have been handled because the
+            // driver must hold the input buffer that triggers resolution change. The
+            // driver cannot decode data in it without new output buffers. If we send
+            // the flush now and a queued input buffer triggers resolution change
+            // later, the driver will send an output buffer that has
+            // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded
+            // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
+            // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
+            // to the decoder.
+            if (mInputQueue->QueuedBuffersCount() > 0) {
+                ALOGD("Wait for all input buffers dequeued.");
+                return;
+            }
+
+            auto request = std::move(mDecodeRequests.front());
+            mDecodeRequests.pop();
+
+            if (!sendV4L2DecoderCmd(false)) {
+                std::move(request.decodeCb).Run(VideoDecoder::DecodeStatus::kError);
+                onError();
+                return;
+            }
+            mDrainCb = std::move(request.decodeCb);
+            setState(State::Draining);
+            return;
+        }
+
+        // Pause if no free input buffer. We resume decoding after dequeueing input buffers.
+        auto inputBuffer = mInputQueue->GetFreeBuffer();
+        if (!inputBuffer) {
+            ALOGV("There is no free input buffer.");
+            return;
+        }
+
+        auto request = std::move(mDecodeRequests.front());
+        mDecodeRequests.pop();
+
+        ALOGV("QBUF to input queue, bitstreadId=%d", request.buffer->id);
+        inputBuffer->SetTimeStamp({.tv_sec = request.buffer->id});
+        size_t planeSize = inputBuffer->GetPlaneSize(0);
+        if (request.buffer->size > planeSize) {
+            ALOGE("The input size (%zu) is not enough, we need %zu", planeSize,
+                  request.buffer->size);
+            onError();
+            return;
+        }
+
+        ALOGV("Set bytes_used=%zu, offset=%zu", request.buffer->offset + request.buffer->size,
+              request.buffer->offset);
+        inputBuffer->SetPlaneDataOffset(0, request.buffer->offset);
+        inputBuffer->SetPlaneBytesUsed(0, request.buffer->offset + request.buffer->size);
+        std::vector<::base::ScopedFD> fds;
+        fds.push_back(std::move(request.buffer->dmabuf_fd));
+        std::move(*inputBuffer).QueueDMABuf(fds);
+
+        mPendingDecodeCbs.insert(std::make_pair(request.buffer->id, std::move(request.decodeCb)));
+    }
+}
+
+void V4L2Decoder::flush() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    if (mState == State::Idle) {
+        ALOGD("Nothing need to flush, ignore.");
+        return;
+    }
+    if (mState == State::Error) {
+        ALOGE("Ignore due to error state.");
+        return;
+    }
+
+    // Call all pending callbacks.
+    for (auto& item : mPendingDecodeCbs) {
+        std::move(item.second).Run(VideoDecoder::DecodeStatus::kAborted);
+    }
+    mPendingDecodeCbs.clear();
+    if (mDrainCb) {
+        std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kAborted);
+    }
+
+    // Streamoff V4L2 queues to drop input and output buffers.
+    mDevice->StopPolling();
+    mOutputQueue->Streamoff();
+    mInputQueue->Streamoff();
+
+    // Streamon input queue again.
+    mInputQueue->Streamon();
+    if (!mDevice->StartPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
+                               ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
+        ALOGE("Failed to start polling V4L2 device.");
+        onError();
+        return;
+    }
+
+    setState(State::Idle);
+}
+
+void V4L2Decoder::serviceDeviceTask(bool event) {
+    ALOGV("%s(event=%d) state=%s InputQueue:%zu+%zu/%zu, OutputQueue:%zu+%zu/%zu", __func__, event,
+          StateToString(mState), mInputQueue->FreeBuffersCount(), mInputQueue->QueuedBuffersCount(),
+          mInputQueue->AllocatedBuffersCount(), mOutputQueue->FreeBuffersCount(),
+          mOutputQueue->QueuedBuffersCount(), mOutputQueue->AllocatedBuffersCount());
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    if (mState == State::Error) return;
+
+    // Dequeue output and input queue.
+    bool inputDequeued = false;
+    while (mInputQueue->QueuedBuffersCount() > 0) {
+        bool success;
+        media::V4L2ReadableBufferRef dequeuedBuffer;
+        std::tie(success, dequeuedBuffer) = mInputQueue->DequeueBuffer();
+        if (!success) {
+            ALOGE("Failed to dequeue buffer from output queue.");
+            onError();
+            return;
+        }
+        if (!dequeuedBuffer) break;
+
+        inputDequeued = true;
+
+        // Run the corresponding decode callback.
+        int32_t id = dequeuedBuffer->GetTimeStamp().tv_sec;
+        ALOGV("DQBUF from input queue, bitstreamId=%d", id);
+        auto it = mPendingDecodeCbs.find(id);
+        if (it == mPendingDecodeCbs.end()) {
+            ALOGW("Callback is already abandoned.");
+            continue;
+        }
+        std::move(it->second).Run(VideoDecoder::DecodeStatus::kOk);
+        mPendingDecodeCbs.erase(it);
+    }
+
+    bool outputDequeued = false;
+    while (mOutputQueue->QueuedBuffersCount() > 0) {
+        bool success;
+        media::V4L2ReadableBufferRef dequeuedBuffer;
+        std::tie(success, dequeuedBuffer) = mOutputQueue->DequeueBuffer();
+        if (!success) {
+            ALOGE("Failed to dequeue buffer from output queue.");
+            onError();
+            return;
+        }
+        if (!dequeuedBuffer) break;
+
+        outputDequeued = true;
+
+        ALOGV("DQBUF from output queue, bufferId=%zu, corresponding bitstreamId=%d, bytesused=%zu",
+              dequeuedBuffer->BufferId(),
+              static_cast<int32_t>(dequeuedBuffer->GetTimeStamp().tv_sec),
+              dequeuedBuffer->GetPlaneBytesUsed(0));
+        if (dequeuedBuffer->GetPlaneBytesUsed(0) > 0) {
+            sendOutputBuffer(dequeuedBuffer);
+        }
+        if (mDrainCb && dequeuedBuffer->IsLast()) {
+            ALOGD("All buffers are drained.");
+            sendV4L2DecoderCmd(true);
+            std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kOk);
+            setState(State::Idle);
+        }
+    }
+
+    // Handle resolution change event.
+    if (event && dequeueResolutionChangeEvent()) {
+        if (!changeResolution()) {
+            onError();
+            return;
+        }
+    }
+
+    // We freed some input buffers, continue handling decode requests.
+    if (inputDequeued) {
+        mTaskRunner->PostTask(FROM_HERE,
+                              ::base::BindOnce(&V4L2Decoder::pumpDecodeRequest, mWeakThis));
+    }
+    // We free some output buffers, try to get VideoFrame.
+    if (outputDequeued) {
+        mTaskRunner->PostTask(FROM_HERE,
+                              ::base::BindOnce(&V4L2Decoder::tryFetchVideoFrame, mWeakThis));
+    }
+}
+
+void V4L2Decoder::sendOutputBuffer(media::V4L2ReadableBufferRef buffer) {
+    ALOGV("%s(bufferId=%zu)", __func__, buffer->BufferId());
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    size_t bufferId = buffer->BufferId();
+    auto it = mFrameAtDevice.find(bufferId);
+    ALOG_ASSERT(it != mFrameAtDevice.end(), "buffer %zu is not found at mFrameAtDevice", bufferId);
+    auto block = std::move(it->second);
+    mFrameAtDevice.erase(it);
+
+    block->setBitstreamId(buffer->GetTimeStamp().tv_sec);
+    block->setVisibleRect(mVisibleRect);
+    mOutputCb.Run(std::move(block));
+}
+
+bool V4L2Decoder::dequeueResolutionChangeEvent() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    struct v4l2_event ev;
+    memset(&ev, 0, sizeof(ev));
+    while (mDevice->Ioctl(VIDIOC_DQEVENT, &ev) == 0) {
+        if (ev.type == V4L2_EVENT_SOURCE_CHANGE &&
+            ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool V4L2Decoder::changeResolution() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    std::optional<struct v4l2_format> format = getFormatInfo();
+    std::optional<size_t> numOutputBuffers = getNumOutputBuffers();
+    if (!format || !numOutputBuffers) {
+        return false;
+    }
+
+    mCodedSize.SetSize(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
+    mVisibleRect = getVisibleRect(mCodedSize);
+
+    ALOGI("Need %zu output buffers. coded size: %s, visible rect: %s", *numOutputBuffers,
+          mCodedSize.ToString().c_str(), mVisibleRect.ToString().c_str());
+    if (mCodedSize.IsEmpty()) {
+        ALOGE("Failed to get resolution from V4L2 driver.");
+        return false;
+    }
+
+    mOutputQueue->Streamoff();
+    mOutputQueue->DeallocateBuffers();
+
+    if (mOutputQueue->AllocateBuffers(*numOutputBuffers, V4L2_MEMORY_DMABUF) == 0) {
+        ALOGE("Failed to allocate output buffer.");
+        return false;
+    }
+    if (!mOutputQueue->Streamon()) {
+        ALOGE("Failed to streamon output queue.");
+        return false;
+    }
+
+    // Always use fexible pixel 420 format YCBCR_420_888 in Android.
+    mGetPoolCb.Run(&mVideoFramePool, mCodedSize, HalPixelFormat::YCBCR_420_888);
+    if (!mVideoFramePool) {
+        ALOGE("Failed to get block pool with size: %s", mCodedSize.ToString().c_str());
+        return false;
+    }
+
+    tryFetchVideoFrame();
+    return true;
+}
+
+void V4L2Decoder::tryFetchVideoFrame() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+    ALOG_ASSERT(mVideoFramePool, "mVideoFramePool is null, haven't get the instance yet?");
+
+    if (mState == State::Idle) return;
+
+    if (mVideoFramePool->hasPendingRequests()) {
+        ALOGD("Previous callback is running, ignore.");
+        return;
+    }
+
+    auto outputBuffer = mOutputQueue->GetFreeBuffer();
+    if (!outputBuffer) {
+        ALOGD("No free output buffer.");
+        return;
+    }
+    mVideoFramePool->getVideoFrame(
+            ::base::BindOnce(&V4L2Decoder::onVideoFrameReady, mWeakThis, std::move(*outputBuffer)));
+}
+
+void V4L2Decoder::onVideoFrameReady(media::V4L2WritableBufferRef outputBuffer,
+                                    std::unique_ptr<VideoFrame> frame) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    if (!frame) {
+        ALOGE("Get nullptr VideoFrame.");
+        onError();
+        return;
+    }
+
+    size_t bufferId = outputBuffer.BufferId();
+    ALOGE("QBUF to output queue, bufferId=%zu", bufferId);
+    std::move(outputBuffer).QueueDMABuf(frame->getFDs());
+    mFrameAtDevice.insert(std::make_pair(bufferId, std::move(frame)));
+
+    tryFetchVideoFrame();
+}
+
+std::optional<size_t> V4L2Decoder::getNumOutputBuffers() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    struct v4l2_control ctrl;
+    memset(&ctrl, 0, sizeof(ctrl));
+    ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
+    if (mDevice->Ioctl(VIDIOC_G_CTRL, &ctrl) != 0) {
+        ALOGE("ioctl() failed: VIDIOC_G_CTRL");
+        return std::nullopt;
+    }
+    ALOGV("%s() V4L2_CID_MIN_BUFFERS_FOR_CAPTURE returns %u", __func__, ctrl.value);
+
+    return ctrl.value + kNumExtraOutputBuffers;
+}
+
+std::optional<struct v4l2_format> V4L2Decoder::getFormatInfo() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    struct v4l2_format format;
+    memset(&format, 0, sizeof(format));
+    format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    if (mDevice->Ioctl(VIDIOC_G_FMT, &format) != 0) {
+        ALOGE("ioctl() failed: VIDIOC_G_FMT");
+        return std::nullopt;
+    }
+
+    return format;
+}
+
+media::Rect V4L2Decoder::getVisibleRect(const media::Size& codedSize) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    struct v4l2_rect* visible_rect = nullptr;
+    struct v4l2_selection selection_arg;
+    memset(&selection_arg, 0, sizeof(selection_arg));
+    selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+    selection_arg.target = V4L2_SEL_TGT_COMPOSE;
+
+    if (mDevice->Ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
+        ALOGV("VIDIOC_G_SELECTION is supported");
+        visible_rect = &selection_arg.r;
+    } else {
+        ALOGV("Fallback to VIDIOC_G_CROP");
+        struct v4l2_crop crop_arg;
+        memset(&crop_arg, 0, sizeof(crop_arg));
+        crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+        if (mDevice->Ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
+            ALOGW("ioctl() VIDIOC_G_CROP failed");
+            return media::Rect(codedSize);
+        }
+        visible_rect = &crop_arg.c;
+    }
+
+    media::Rect rect(visible_rect->left, visible_rect->top, visible_rect->width,
+                     visible_rect->height);
+    ALOGD("visible rectangle is %s", rect.ToString().c_str());
+    if (!media::Rect(codedSize).Contains(rect)) {
+        ALOGW("visible rectangle %s is not inside coded size %s", rect.ToString().c_str(),
+              codedSize.ToString().c_str());
+        return media::Rect(codedSize);
+    }
+    if (rect.IsEmpty()) {
+        ALOGW("visible size is empty");
+        return media::Rect(codedSize);
+    }
+
+    return rect;
+}
+
+bool V4L2Decoder::sendV4L2DecoderCmd(bool start) {
+    ALOGV("%s(start=%d)", __func__, start);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    struct v4l2_decoder_cmd cmd;
+    memset(&cmd, 0, sizeof(cmd));
+    cmd.cmd = start ? V4L2_DEC_CMD_START : V4L2_DEC_CMD_STOP;
+    if (mDevice->Ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
+        ALOGE("ioctl() VIDIOC_DECODER_CMD failed: start=%d", start);
+        return false;
+    }
+
+    return true;
+}
+
+void V4L2Decoder::onError() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    setState(State::Error);
+    mErrorCb.Run();
+}
+
+void V4L2Decoder::setState(State newState) {
+    ALOGV("%s(%s)", __func__, StateToString(newState));
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    if (mState == newState) return;
+    if (mState == State::Error) {
+        ALOGV("Already in Error state.");
+        return;
+    }
+
+    switch (newState) {
+    case State::Idle:
+        break;
+    case State::Decoding:
+        break;
+    case State::Draining:
+        if (mState != State::Decoding) newState = State::Error;
+        break;
+    case State::Error:
+        break;
+    }
+
+    ALOGI("Set state %s => %s", StateToString(mState), StateToString(newState));
+    mState = newState;
+}
+
+// static
+const char* V4L2Decoder::StateToString(State state) {
+    switch (state) {
+    case State::Idle:
+        return "Idle";
+    case State::Decoding:
+        return "Decoding";
+    case State::Draining:
+        return "Draining";
+    case State::Error:
+        return "Error";
+    }
+}
+
+}  // namespace android
diff --git a/components/VideoDecoder.cpp b/components/VideoDecoder.cpp
new file mode 100644
index 0000000..6e48169
--- /dev/null
+++ b/components/VideoDecoder.cpp
@@ -0,0 +1,23 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <v4l2_codec2/components/VideoDecoder.h>
+
+namespace android {
+
+// static
+const char* VideoDecoder::DecodeStatusToString(VideoDecoder::DecodeStatus status) {
+    switch (status) {
+    case VideoDecoder::DecodeStatus::kOk:
+        return "OK";
+    case VideoDecoder::DecodeStatus::kAborted:
+        return "ABORTED";
+    case VideoDecoder::DecodeStatus::kError:
+        return "ERROR";
+    }
+}
+
+VideoDecoder::~VideoDecoder() = default;
+
+}  // namespace android
diff --git a/components/VideoFrame.cpp b/components/VideoFrame.cpp
new file mode 100644
index 0000000..bcdb283
--- /dev/null
+++ b/components/VideoFrame.cpp
@@ -0,0 +1,61 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoFrame"
+
+#include <v4l2_codec2/components/VideoFrame.h>
+
+#include <C2AllocatorGralloc.h>
+#include <log/log.h>
+
+namespace android {
+
+// static
+std::unique_ptr<VideoFrame> VideoFrame::Create(std::shared_ptr<C2GraphicBlock> block) {
+    if (!block) return nullptr;
+
+    std::vector<::base::ScopedFD> fds;
+    const C2Handle* const handle = block->handle();
+    for (int i = 0; i < handle->numFds; i++) {
+        fds.emplace_back(dup(handle->data[i]));
+        if (!fds.back().is_valid()) {
+            ALOGE("Failed to dup(%d), errno=%d", handle->data[i], errno);
+            return nullptr;
+        }
+    }
+
+    return std::unique_ptr<VideoFrame>(new VideoFrame(std::move(block), std::move(fds)));
+}
+
+VideoFrame::VideoFrame(std::shared_ptr<C2GraphicBlock> block, std::vector<::base::ScopedFD> fds)
+      : mGraphicBlock(std::move(block)), mFds(std::move(fds)) {}
+
+VideoFrame::~VideoFrame() = default;
+
+const std::vector<::base::ScopedFD>& VideoFrame::getFDs() const {
+    return mFds;
+}
+
+void VideoFrame::setVisibleRect(const media::Rect& visibleRect) {
+    mVisibleRect = visibleRect;
+}
+
+const media::Rect& VideoFrame::getVisibleRect() const {
+    return mVisibleRect;
+}
+
+void VideoFrame::setBitstreamId(int32_t bitstreamId) {
+    mBitstreamId = bitstreamId;
+}
+
+int32_t VideoFrame::getBitstreamId() const {
+    return mBitstreamId;
+}
+
+C2ConstGraphicBlock VideoFrame::getGraphicBlock() {
+    return mGraphicBlock->share(C2Rect(mVisibleRect.width(), mVisibleRect.height()), C2Fence());
+}
+
+}  // namespace android
diff --git a/components/VideoFramePool.cpp b/components/VideoFramePool.cpp
new file mode 100644
index 0000000..c2f68fc
--- /dev/null
+++ b/components/VideoFramePool.cpp
@@ -0,0 +1,105 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoFramePool"
+
+#include <v4l2_codec2/components/VideoFramePool.h>
+
+#include <stdint.h>
+#include <memory>
+
+#include <android/hardware/graphics/common/1.0/types.h>
+#include <base/bind.h>
+#include <base/time/time.h>
+#include <log/log.h>
+
+#include <v4l2_codec2/components/VideoTypes.h>
+
+using android::hardware::graphics::common::V1_0::BufferUsage;
+
+namespace android {
+namespace {
+constexpr size_t kAllocateBufferMaxRetries = 32;
+constexpr size_t kFetchRetryDelayUs = 500;
+}  // namespace
+
+VideoFramePool::VideoFramePool(std::shared_ptr<C2BlockPool> blockPool, const media::Size& size,
+                               HalPixelFormat pixelFormat, bool isSecure,
+                               scoped_refptr<::base::SequencedTaskRunner> taskRunner)
+      : mBlockPool(std::move(blockPool)),
+        mSize(size),
+        mPixelFormat(pixelFormat),
+        mMemoryUsage(isSecure ? C2MemoryUsage::READ_PROTECTED : C2MemoryUsage::CPU_READ,
+                     static_cast<uint64_t>(BufferUsage::VIDEO_DECODER)),
+        mTaskRunner(std::move(taskRunner)) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+    DCHECK(mBlockPool);
+    DCHECK(mTaskRunner);
+
+    mWeakThis = mWeakThisFactory.GetWeakPtr();
+}
+
+VideoFramePool::~VideoFramePool() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    mWeakThisFactory.InvalidateWeakPtrs();
+}
+
+void VideoFramePool::getVideoFrame(GetVideoFrameCB cb) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    bool isRunning = !mCbQueue.empty();
+    mCbQueue.push(std::move(cb));
+    if (!isRunning) tryFetchGraphicBlock();
+}
+
+bool VideoFramePool::hasPendingRequests() const {
+    return !mCbQueue.empty();
+}
+
+void VideoFramePool::tryFetchGraphicBlock() {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+
+    if (mCbQueue.empty()) return;
+
+    std::shared_ptr<C2GraphicBlock> block;
+    auto err = mBlockPool->fetchGraphicBlock(mSize.width(), mSize.height(),
+                                             static_cast<uint32_t>(mPixelFormat), mMemoryUsage,
+                                             &block);
+
+    if ((err == C2_TIMED_OUT || err == C2_BLOCKING) && mNumRetries++ < kAllocateBufferMaxRetries) {
+        ALOGD("fetchGraphicBlock() timeout. retry %zu times", mNumRetries);
+        mTaskRunner->PostDelayedTask(
+                FROM_HERE, ::base::BindOnce(&VideoFramePool::tryFetchGraphicBlock, mWeakThis),
+                ::base::TimeDelta::FromMicroseconds(kFetchRetryDelayUs));
+    } else if (err != C2_OK) {
+        ALOGE("Failed to fetch block, err=%d, retry %zu times", err, mNumRetries);
+        sendVideoFrame(nullptr);
+    } else {
+        mNumRetries = 0;
+        sendVideoFrame(VideoFrame::Create(std::move(block)));
+
+        if (!mCbQueue.empty()) {
+            mTaskRunner->PostTask(
+                    FROM_HERE, ::base::BindOnce(&VideoFramePool::tryFetchGraphicBlock, mWeakThis));
+        }
+    }
+}
+
+void VideoFramePool::sendVideoFrame(std::unique_ptr<VideoFrame> frame) {
+    ALOGV("%s()", __func__);
+    ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
+    ALOG_ASSERT(!mCbQueue.empty());
+
+    auto cb = std::move(mCbQueue.front());
+    mCbQueue.pop();
+    std::move(cb).Run(std::move(frame));
+}
+
+}  // namespace android
diff --git a/components/VideoTypes.cpp b/components/VideoTypes.cpp
new file mode 100644
index 0000000..9b71db8
--- /dev/null
+++ b/components/VideoTypes.cpp
@@ -0,0 +1,36 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoTypes"
+
+#include <v4l2_codec2/components/VideoTypes.h>
+
+#include <log/log.h>
+
+namespace android {
+
+const char* VideoCodecToString(VideoCodec codec) {
+    switch (codec) {
+    case VideoCodec::H264:
+        return "H264";
+    case VideoCodec::VP8:
+        return "VP8";
+    case VideoCodec::VP9:
+        return "VP9";
+    }
+}
+
+const char* HalPixelFormatToString(HalPixelFormat format) {
+    switch (format) {
+    case HalPixelFormat::YCBCR_420_888:
+        return "YCBCR_420_888";
+    case HalPixelFormat::YV12:
+        return "YV12";
+    case HalPixelFormat::NV12:
+        return "NV12";
+    }
+}
+
+}  // namespace android
diff --git a/components/include/v4l2_codec2/components/V4L2DecodeComponent.h b/components/include/v4l2_codec2/components/V4L2DecodeComponent.h
new file mode 100644
index 0000000..0def53c
--- /dev/null
+++ b/components/include/v4l2_codec2/components/V4L2DecodeComponent.h
@@ -0,0 +1,147 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_COMPONENT_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_COMPONENT_H
+
+#include <memory>
+
+#include <C2Component.h>
+#include <C2ComponentFactory.h>
+#include <C2Work.h>
+#include <base/memory/scoped_refptr.h>
+#include <base/memory/weak_ptr.h>
+#include <base/sequenced_task_runner.h>
+#include <base/synchronization/waitable_event.h>
+#include <base/threading/thread.h>
+
+#include <v4l2_codec2/components/V4L2DecodeInterface.h>
+#include <v4l2_codec2/components/VideoDecoder.h>
+#include <v4l2_codec2/components/VideoFramePool.h>
+#include <v4l2_device.h>
+
+namespace android {
+
+class V4L2DecodeComponent : public C2Component,
+                            public std::enable_shared_from_this<V4L2DecodeComponent> {
+public:
+    static std::shared_ptr<C2Component> create(const std::string& name, c2_node_id_t id,
+                                               const std::shared_ptr<C2ReflectorHelper>& helper,
+                                               C2ComponentFactory::ComponentDeleter deleter);
+    V4L2DecodeComponent(const std::string& name, c2_node_id_t id,
+                        const std::shared_ptr<C2ReflectorHelper>& helper,
+                        const std::shared_ptr<V4L2DecodeInterface>& intfImpl);
+    ~V4L2DecodeComponent() override;
+
+    // Implementation of C2Component.
+    c2_status_t start() override;
+    c2_status_t stop() override;
+    c2_status_t reset() override;
+    c2_status_t release() override;
+    c2_status_t setListener_vb(const std::shared_ptr<Listener>& listener,
+                               c2_blocking_t mayBlock) override;
+    c2_status_t queue_nb(std::list<std::unique_ptr<C2Work>>* const items) override;
+    c2_status_t announce_nb(const std::vector<C2WorkOutline>& items) override;
+    c2_status_t flush_sm(flush_mode_t mode,
+                         std::list<std::unique_ptr<C2Work>>* const flushedWork) override;
+    c2_status_t drain_nb(drain_mode_t mode) override;
+    std::shared_ptr<C2ComponentInterface> intf() override;
+
+private:
+    // The C2Component state machine.
+    enum class ComponentState {
+        STOPPED,
+        RUNNING,
+        RELEASED,
+        ERROR,
+    };
+    static const char* ComponentStateToString(ComponentState state);
+
+    // Handle C2Component's public methods on |mDecoderTaskRunner|.
+    void destroyTask();
+    void startTask(c2_status_t* status);
+    void stopTask();
+    void queueTask(std::unique_ptr<C2Work> work);
+    void flushTask();
+    void drainTask();
+    void setListenerTask(const std::shared_ptr<Listener>& listener, ::base::WaitableEvent* done);
+
+    // Try to process pending works at |mPendingWorks|. Paused when |mIsDraining| is set.
+    void pumpPendingWorks();
+    // Get the buffer pool.
+    void getVideoFramePool(std::unique_ptr<VideoFramePool>* pool, const media::Size& size,
+                           HalPixelFormat pixelFormat);
+    // Detect and report works with no-show frame, only used at VP8 and VP9.
+    void detectNoShowFrameWorksAndReportIfFinished(const C2WorkOrdinalStruct& currOrdinal);
+
+    // Finish callbacks of each method.
+    void onOutputFrameReady(std::unique_ptr<VideoFrame> frame);
+    void onDecodeDone(int32_t bitstreamId, VideoDecoder::DecodeStatus status);
+    void onDrainDone(VideoDecoder::DecodeStatus status);
+    void onFlushDone();
+
+    // Try to process decoding works at |mPendingWorks|.
+    void pumpReportWork();
+    // Report finished work.
+    bool reportWorkIfFinished(int32_t bitstreamId);
+    bool reportEOSWork();
+    void reportAbandonedWorks();
+    bool reportWork(std::unique_ptr<C2Work> work);
+    // Report error when any error occurs.
+    void reportError(c2_status_t error);
+
+    // The pointer of component interface implementation.
+    std::shared_ptr<V4L2DecodeInterface> mIntfImpl;
+    // The pointer of component interface.
+    const std::shared_ptr<C2ComponentInterface> mIntf;
+    // The pointer of component listener.
+    std::shared_ptr<Listener> mListener;
+
+    std::unique_ptr<VideoDecoder> mDecoder;
+    // The queue of works that haven't processed and sent to |mDecoder|.
+    std::queue<std::unique_ptr<C2Work>> mPendingWorks;
+    // The works whose input buffers are sent to |mDecoder|. The key is the
+    // bitstream ID of work's input buffer.
+    std::map<int32_t, std::unique_ptr<C2Work>> mWorksAtDecoder;
+    // The bitstream ID of the works that output frames have been returned from |mDecoder|.
+    // The order is display order.
+    std::queue<int32_t> mOutputBitstreamIds;
+
+    // Set to true when decoding the protected playback.
+    bool mIsSecure = false;
+    // The component state.
+    std::atomic<ComponentState> mComponentState{ComponentState::STOPPED};
+    // Whether we are currently draining the component. This is set when the component is processing
+    // the drain request, and unset either after reportEOSWork() (EOS is outputted), or
+    // reportAbandonedWorks() (drain is cancelled and works are abandoned).
+    bool mIsDraining = false;
+
+    // The mutex lock to synchronize start/stop/reset/release calls.
+    std::mutex mStartStopLock;
+    ::base::WaitableEvent mStartStopDone;
+
+    // The color aspects parameter for current decoded output buffers.
+    std::shared_ptr<C2StreamColorAspectsInfo::output> mCurrentColorAspects;
+    // The flag of pending color aspects change. This should be set once we have parsed color
+    // aspects from bitstream by parseCodedColorAspects(), at the same time recorded input frame
+    // index into |mPendingColorAspectsChangeFrameIndex|.
+    // When this flag is true and the corresponding frame index is not less than
+    // |mPendingColorAspectsChangeFrameIndex| for the output buffer in onOutputBufferDone(), update
+    // |mCurrentColorAspects| from component interface and reset the flag.
+    bool mPendingColorAspectsChange = false;
+    // The record of frame index to update color aspects. Details as above.
+    uint64_t mPendingColorAspectsChangeFrameIndex;
+
+    // The device task runner and its sequence checker. We should interact with
+    // |mDevice| on this.
+    ::base::Thread mDecoderThread{"V4L2DecodeComponentDecoderThread"};
+    scoped_refptr<::base::SequencedTaskRunner> mDecoderTaskRunner;
+
+    ::base::WeakPtrFactory<V4L2DecodeComponent> mWeakThisFactory{this};
+    ::base::WeakPtr<V4L2DecodeComponent> mWeakThis;
+};
+
+}  // namespace android
+
+#endif  // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_COMPONENT_H
diff --git a/components/include/v4l2_codec2/components/V4L2DecodeInterface.h b/components/include/v4l2_codec2/components/V4L2DecodeInterface.h
new file mode 100644
index 0000000..0abf4ec
--- /dev/null
+++ b/components/include/v4l2_codec2/components/V4L2DecodeInterface.h
@@ -0,0 +1,99 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_INTERFACE_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_INTERFACE_H
+
+#include <memory>
+#include <string>
+
+#include <C2Config.h>
+#include <util/C2InterfaceHelper.h>
+
+#include <size.h>
+#include <v4l2_codec2/components/VideoTypes.h>
+
+namespace android {
+
+class V4L2DecodeInterface : public C2InterfaceHelper {
+public:
+    V4L2DecodeInterface(const std::string& name, const std::shared_ptr<C2ReflectorHelper>& helper);
+    V4L2DecodeInterface(const V4L2DecodeInterface&) = delete;
+    V4L2DecodeInterface& operator=(const V4L2DecodeInterface&) = delete;
+    ~V4L2DecodeInterface() = default;
+
+    // interfaces for the client component.
+    c2_status_t status() const { return mInitStatus; }
+    C2BlockPool::local_id_t getBlockPoolId() const { return mOutputBlockPoolIds->m.values[0]; }
+    std::optional<VideoCodec> getVideoCodec() const { return mVideoCodec; }
+    media::Size getMaxSize() const { return mMaxSize; }
+    media::Size getMinSize() const { return mMinSize; }
+
+    size_t getInputBufferSize() const;
+    c2_status_t queryColorAspects(
+            std::shared_ptr<C2StreamColorAspectsInfo::output>* targetColorAspects);
+
+private:
+    // Configurable parameter setters.
+    static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input>& info);
+    static C2R SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::output>& videoSize);
+    static C2R MaxInputBufferSizeCalculator(bool mayBlock,
+                                            C2P<C2StreamMaxBufferSizeInfo::input>& me,
+                                            const C2P<C2StreamPictureSizeInfo::output>& size);
+
+    template <typename T>
+    static C2R DefaultColorAspectsSetter(bool mayBlock, C2P<T>& def);
+
+    static C2R MergedColorAspectsSetter(bool mayBlock,
+                                        C2P<C2StreamColorAspectsInfo::output>& merged,
+                                        const C2P<C2StreamColorAspectsTuning::output>& def,
+                                        const C2P<C2StreamColorAspectsInfo::input>& coded);
+
+    // The input format kind; should be C2FormatCompressed.
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    // The memory usage flag of input buffer; should be BufferUsage::VIDEO_DECODER.
+    std::shared_ptr<C2StreamUsageTuning::input> mInputMemoryUsage;
+    // The output format kind; should be C2FormatVideo.
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    // The MIME type of input port.
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    // The MIME type of output port; should be MEDIA_MIMETYPE_VIDEO_RAW.
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
+    // The input codec profile and level. For now configuring this parameter is useless since
+    // the component always uses fixed codec profile to initialize accelerator. It is only used
+    // for the client to query supported profile and level values.
+    // TODO: use configured profile/level to initialize accelerator.
+    std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
+    // Decoded video size for output.
+    std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
+    // Maximum size of one input buffer.
+    std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mMaxInputSize;
+    // The suggested usage of input buffer allocator ID.
+    std::shared_ptr<C2PortAllocatorsTuning::input> mInputAllocatorIds;
+    // The suggested usage of output buffer allocator ID.
+    std::shared_ptr<C2PortAllocatorsTuning::output> mOutputAllocatorIds;
+    // The suggested usage of output buffer allocator ID with surface.
+    std::shared_ptr<C2PortSurfaceAllocatorTuning::output> mOutputSurfaceAllocatorId;
+    // Component uses this ID to fetch corresponding output block pool from platform.
+    std::shared_ptr<C2PortBlockPoolsTuning::output> mOutputBlockPoolIds;
+    // The color aspects parsed from input bitstream. This parameter should be configured by
+    // component while decoding.
+    std::shared_ptr<C2StreamColorAspectsInfo::input> mCodedColorAspects;
+    // The default color aspects specified by requested output format. This parameter should be
+    // configured by client.
+    std::shared_ptr<C2StreamColorAspectsTuning::output> mDefaultColorAspects;
+    // The combined color aspects by |mCodedColorAspects| and |mDefaultColorAspects|, and the
+    // former has higher priority. This parameter is used for component to provide color aspects
+    // as C2Info in decoded output buffers.
+    std::shared_ptr<C2StreamColorAspectsInfo::output> mColorAspects;
+
+    c2_status_t mInitStatus;
+    std::optional<VideoCodec> mVideoCodec;
+    media::Size mMinSize;
+    media::Size mMaxSize;
+};
+
+}  // namespace android
+
+#endif  // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_INTERFACE_H
diff --git a/components/include/v4l2_codec2/components/V4L2Decoder.h b/components/include/v4l2_codec2/components/V4L2Decoder.h
new file mode 100644
index 0000000..9cf0532
--- /dev/null
+++ b/components/include/v4l2_codec2/components/V4L2Decoder.h
@@ -0,0 +1,111 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODER_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODER_H
+
+#include <stdint.h>
+
+#include <memory>
+#include <optional>
+
+#include <base/callback.h>
+#include <base/files/scoped_file.h>
+#include <base/memory/weak_ptr.h>
+
+#include <rect.h>
+#include <size.h>
+#include <v4l2_codec2/components/VideoDecoder.h>
+#include <v4l2_codec2/components/VideoFrame.h>
+#include <v4l2_codec2/components/VideoFramePool.h>
+#include <v4l2_codec2/components/VideoTypes.h>
+#include <v4l2_device.h>
+
+namespace android {
+
+class V4L2Decoder : public VideoDecoder {
+public:
+    static std::unique_ptr<VideoDecoder> Create(
+            const VideoCodec& codec, const size_t inputBufferSize, GetPoolCB getPoolCB,
+            OutputCB outputCb, ErrorCB errorCb,
+            scoped_refptr<::base::SequencedTaskRunner> taskRunner);
+    ~V4L2Decoder() override;
+
+    void decode(std::unique_ptr<BitstreamBuffer> buffer, DecodeCB decodeCb) override;
+    void drain(DecodeCB drainCb) override;
+    void flush() override;
+
+private:
+    enum class State {
+        Idle,  // Not received any decode buffer after initialized, flushed, or drained.
+        Decoding,
+        Draining,
+        Error,
+    };
+    static const char* StateToString(State state);
+
+    struct DecodeRequest {
+        DecodeRequest(std::unique_ptr<BitstreamBuffer> buffer, DecodeCB decodeCb)
+              : buffer(std::move(buffer)), decodeCb(std::move(decodeCb)) {}
+        DecodeRequest(DecodeRequest&&) = default;
+        ~DecodeRequest() = default;
+        DecodeRequest& operator=(DecodeRequest&&);
+
+        std::unique_ptr<BitstreamBuffer> buffer;  // nullptr means Drain
+        DecodeCB decodeCb;
+    };
+
+    V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner);
+    bool start(const VideoCodec& codec, const size_t inputBufferSize, GetPoolCB getPoolCb,
+               OutputCB outputCb, ErrorCB errorCb);
+    bool setupInputFormat(const uint32_t inputPixelFormat, const size_t inputBufferSize);
+    void pumpDecodeRequest();
+
+    void serviceDeviceTask(bool event);
+    void sendOutputBuffer(media::V4L2ReadableBufferRef buffer);
+    bool dequeueResolutionChangeEvent();
+    bool changeResolution();
+
+    void tryFetchVideoFrame();
+    void onVideoFrameReady(media::V4L2WritableBufferRef outputBuffer,
+                           std::unique_ptr<VideoFrame> block);
+
+    std::optional<size_t> getNumOutputBuffers();
+    std::optional<struct v4l2_format> getFormatInfo();
+    media::Rect getVisibleRect(const media::Size& codedSize);
+    bool sendV4L2DecoderCmd(bool start);
+
+    void setState(State newState);
+    void onError();
+
+    std::unique_ptr<VideoFramePool> mVideoFramePool;
+
+    scoped_refptr<media::V4L2Device> mDevice;
+    scoped_refptr<media::V4L2Queue> mInputQueue;
+    scoped_refptr<media::V4L2Queue> mOutputQueue;
+
+    std::queue<DecodeRequest> mDecodeRequests;
+    std::map<int32_t, DecodeCB> mPendingDecodeCbs;
+
+    GetPoolCB mGetPoolCb;
+    OutputCB mOutputCb;
+    DecodeCB mDrainCb;
+    ErrorCB mErrorCb;
+
+    media::Size mCodedSize;
+    media::Rect mVisibleRect;
+
+    std::map<size_t, std::unique_ptr<VideoFrame>> mFrameAtDevice;
+
+    State mState = State::Idle;
+
+    scoped_refptr<::base::SequencedTaskRunner> mTaskRunner;
+
+    ::base::WeakPtr<V4L2Decoder> mWeakThis;
+    ::base::WeakPtrFactory<V4L2Decoder> mWeakThisFactory{this};
+};
+
+}  // namespace android
+
+#endif  // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODER_H
diff --git a/components/include/v4l2_codec2/components/VideoDecoder.h b/components/include/v4l2_codec2/components/VideoDecoder.h
new file mode 100644
index 0000000..9616106
--- /dev/null
+++ b/components/include/v4l2_codec2/components/VideoDecoder.h
@@ -0,0 +1,56 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_DECODER_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_DECODER_H
+
+#include <stdint.h>
+#include <memory>
+
+#include <base/callback.h>
+#include <base/files/scoped_file.h>
+
+#include <v4l2_codec2/components/VideoFrame.h>
+#include <v4l2_codec2/components/VideoFramePool.h>
+#include <v4l2_codec2/components/VideoTypes.h>
+
+namespace android {
+
+class VideoDecoder {
+public:
+    enum class DecodeStatus {
+        kOk = 0,   // Everything went as planned.
+        kAborted,  // Read aborted due to Flush() during pending read.
+        kError,    // Decoder returned decode error.
+    };
+    static const char* DecodeStatusToString(DecodeStatus status);
+
+    struct BitstreamBuffer {
+        BitstreamBuffer(const int32_t id, base::ScopedFD dmabuf_fd, const size_t offset,
+                        const size_t size)
+              : id(id), dmabuf_fd(std::move(dmabuf_fd)), offset(offset), size(size) {}
+        ~BitstreamBuffer() = default;
+
+        const int32_t id;
+        base::ScopedFD dmabuf_fd;
+        const size_t offset;
+        const size_t size;
+    };
+
+    using GetPoolCB = base::RepeatingCallback<void(
+            std::unique_ptr<VideoFramePool>*, const media::Size& size, HalPixelFormat pixelFormat)>;
+    using DecodeCB = base::OnceCallback<void(DecodeStatus)>;
+    using OutputCB = base::RepeatingCallback<void(std::unique_ptr<VideoFrame>)>;
+    using ErrorCB = base::RepeatingCallback<void()>;
+
+    virtual ~VideoDecoder();
+
+    virtual void decode(std::unique_ptr<BitstreamBuffer> buffer, DecodeCB decodeCb) = 0;
+    virtual void drain(DecodeCB drainCb) = 0;
+    virtual void flush() = 0;
+};
+
+}  // namespace android
+
+#endif  // ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_DECODER_H
diff --git a/components/include/v4l2_codec2/components/VideoFrame.h b/components/include/v4l2_codec2/components/VideoFrame.h
new file mode 100644
index 0000000..f666f4d
--- /dev/null
+++ b/components/include/v4l2_codec2/components/VideoFrame.h
@@ -0,0 +1,50 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_FRAME_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_FRAME_H
+
+#include <memory>
+#include <vector>
+
+#include <C2Buffer.h>
+#include <base/files/scoped_file.h>
+
+#include <rect.h>
+
+namespace android {
+
+// Wrap C2GraphicBlock and provide essiential information from C2GraphicBlock.
+class VideoFrame {
+public:
+    // Create the instance from C2GraphicBlock. return nullptr if any error occurs.
+    static std::unique_ptr<VideoFrame> Create(std::shared_ptr<C2GraphicBlock> block);
+    ~VideoFrame();
+
+    // Return the file descriptors of the corresponding buffer.
+    const std::vector<::base::ScopedFD>& getFDs() const;
+
+    // Getter and setter of the visible rectangle.
+    void setVisibleRect(const media::Rect& visibleRect);
+    const media::Rect& getVisibleRect() const;
+
+    // Getter and setter of the bitstream ID of the corresponding input bitstream.
+    void setBitstreamId(int32_t bitstreamId);
+    int32_t getBitstreamId() const;
+
+    // Get the read-only C2GraphicBlock, should be called after calling setVisibleRect().
+    C2ConstGraphicBlock getGraphicBlock();
+
+private:
+    VideoFrame(std::shared_ptr<C2GraphicBlock> block, std::vector<::base::ScopedFD> fds);
+
+    std::shared_ptr<C2GraphicBlock> mGraphicBlock;
+    std::vector<::base::ScopedFD> mFds;
+    media::Rect mVisibleRect;
+    int32_t mBitstreamId = -1;
+};
+
+}  // namespace android
+
+#endif  // ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_FRAME_H
diff --git a/components/include/v4l2_codec2/components/VideoFramePool.h b/components/include/v4l2_codec2/components/VideoFramePool.h
new file mode 100644
index 0000000..64e9d71
--- /dev/null
+++ b/components/include/v4l2_codec2/components/VideoFramePool.h
@@ -0,0 +1,65 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_FRAME_POOL_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_FRAME_POOL_H
+
+#include <memory>
+#include <queue>
+
+#include <C2Buffer.h>
+#include <base/callback.h>
+#include <base/memory/weak_ptr.h>
+#include <base/sequenced_task_runner.h>
+
+#include <size.h>
+#include <v4l2_codec2/components/VideoFrame.h>
+#include <v4l2_codec2/components/VideoTypes.h>
+
+namespace android {
+
+// Fetch C2GraphicBlock from C2BlockPool and wrap to VideoFrame.
+// Provide asynchronous call which avoid the caller busy-polling while
+// C2BlockPool::fetchGraphicBlock() times out.
+class VideoFramePool {
+public:
+    using GetVideoFrameCB = base::OnceCallback<void(std::unique_ptr<VideoFrame>)>;
+
+    // |blockPool| is the C2BlockPool that we fetch graphic blocks from.
+    // |size| is the resolution size of the required graphic blocks.
+    // |pixelFormat| is the pixel format of the required graphic blocks.
+    // |isSecure| indicates the video stream is encrypted or not.
+    // All public methods and the callbacks should be run on |taskRunner|.
+    VideoFramePool(std::shared_ptr<C2BlockPool> blockPool, const media::Size& size,
+                   HalPixelFormat pixelFormat, bool isSecure,
+                   scoped_refptr<::base::SequencedTaskRunner> taskRunner);
+    ~VideoFramePool();
+
+    // Get a VideoFrame instance, which will be passed via |cb|.
+    // If any error occurs, then pass nullptr.
+    void getVideoFrame(GetVideoFrameCB cb);
+
+    // Return true if any callback of getting VideoFrame instance is pending.
+    bool hasPendingRequests() const;
+
+private:
+    void tryFetchGraphicBlock();
+    void sendVideoFrame(std::unique_ptr<VideoFrame> frame);
+
+    std::shared_ptr<C2BlockPool> mBlockPool;
+    const media::Size mSize;
+    const HalPixelFormat mPixelFormat;
+    const C2MemoryUsage mMemoryUsage;
+
+    std::queue<GetVideoFrameCB> mCbQueue;
+    size_t mNumRetries = 0;
+
+    scoped_refptr<::base::SequencedTaskRunner> mTaskRunner;
+    base::WeakPtr<VideoFramePool> mWeakThis;
+    base::WeakPtrFactory<VideoFramePool> mWeakThisFactory{this};
+};
+
+}  // namespace android
+
+#endif  // ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_FRAME_POOL_H
diff --git a/components/include/v4l2_codec2/components/VideoTypes.h b/components/include/v4l2_codec2/components/VideoTypes.h
new file mode 100644
index 0000000..bcc9bc0
--- /dev/null
+++ b/components/include/v4l2_codec2/components/VideoTypes.h
@@ -0,0 +1,36 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_TYPES_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_TYPES_H
+
+#include <optional>
+#include <string>
+
+#include <android/hardware/graphics/common/1.0/types.h>
+
+namespace android {
+
+// Enumeration of supported video codecs.
+enum class VideoCodec {
+    H264,
+    VP8,
+    VP9,
+};
+const char* VideoCodecToString(VideoCodec codec);
+
+// Enumeration of supported pixel format. The value should be the same as
+// ::android::hardware::graphics::common::V1_0::PixelFormat.
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+enum class HalPixelFormat : int32_t {
+    YCBCR_420_888 = static_cast<int32_t>(PixelFormat::YCBCR_420_888),
+    YV12 = static_cast<int32_t>(PixelFormat::YV12),
+    // NV12 is not defined at PixelFormat, follow the convention to use fourcc value.
+    NV12 = 0x3231564e,
+};
+const char* HalPixelFormatToString(HalPixelFormat format);
+
+}  // namespace android
+
+#endif  // ANDROID_V4L2_CODEC2_COMPONENTS_VIDEO_TYPES_H