Add support library compabibility layer

This change provides a new "libneuralnetworks_cl" static library that
contains a version of the NNAPI runtime wihout non-NDK dependencies.
Vendors can link against this library to create support library drivers.

This change introduces new preprocessor symbols:
- NN_COMPATIBILITY_LIBRARY_BUILD
- NN_NO_AHWB
- NN_NO_BURST

The last two temporarily limit corresponding runtime features and should
be removed by later changes.

If NN_COMPATIBILITY_LIBRARY_BUILD is defined, then NN_NO_AHWB and
NN_NO_BURST should be defined and NN_INCLUDE_CPU_IMPLEMENTATION should
be undefined.

Bug: 160667944
Bug: 170289677
Test: NNT_static
Change-Id: I5685d0e46dc9461d62815f61dddd5fc7aedf3762
Merged-In: I5685d0e46dc9461d62815f61dddd5fc7aedf3762
(cherry picked from commit 8c4917bbc4c123778d9c4d0b9a5df79897fc4051)
diff --git a/common/Android.bp b/common/Android.bp
index 8283809..70768ff 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -253,6 +253,73 @@
 }
 
 cc_defaults {
+    name: "neuralnetworks_cl_defaults",
+    host_supported: false,
+    vendor_available: false,
+    stl: "libc++_static",
+    sdk_version: "current",
+    cflags: [
+        "-DNN_COMPATIBILITY_LIBRARY_BUILD",
+        "-DNN_NO_AHWB",
+        "-DNN_NO_BURST",
+    ],
+}
+
+cc_library_static {
+    name: "libneuralnetworks_common_cl",
+    defaults: [
+        "neuralnetworks_cl_defaults",
+        "neuralnetworks_defaults",
+        "neuralnetworks_operations",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.neuralnetworks",
+        "test_com.android.neuralnetworks",
+    ],
+    // b/109953668, disable OpenMP
+    // openmp: true,
+    export_include_dirs: [
+        "include",
+    ],
+    srcs: [
+        "BufferTracker.cpp",
+        "CpuExecutor.cpp",
+        "GraphDump.cpp",
+        "IndexedShapeWrapper.cpp",
+        "LegacyUtils.cpp",
+        "MetaModel.cpp",
+        "OperationsUtils.cpp",
+        "TokenHasher.cpp",
+    ],
+    header_libs: [
+        "libneuralnetworks_headers_ndk",
+    ],
+    static_libs: [
+        "libbase_ndk",
+        "libcrypto_static",
+    ],
+    shared_libs: [
+        "libnativewindow",
+    ],
+    cflags: [
+        "-DNAMESPACE_FOR_HASH_FUNCTIONS=farmhash",
+        "-DTF_LITE_DISABLE_X86_NEON",
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+        "-Wno-array-bounds",
+        "-Wno-extern-c-compat",
+        "-Wno-invalid-partial-specialization",
+        "-Wno-sign-compare",
+        "-Wno-unused-local-typedef",
+        "-Wno-unused-parameter",
+        "-Wno-unused-private-field",
+        "-Wno-unused-variable",
+    ],
+}
+
+cc_defaults {
     name: "neuralnetworks_utils_defaults",
     // b/146324523, NNAPI host build capability
     host_supported: false,
@@ -309,6 +376,29 @@
     ],
 }
 
+cc_library_static {
+    name: "neuralnetworks_types_cl",
+    defaults: [
+        "neuralnetworks_cl_defaults",
+        "neuralnetworks_utils_defaults",
+    ],
+    srcs: [
+        "SharedMemory.cpp",
+        "SharedMemoryAndroid.cpp",
+        "TypeUtils.cpp",
+        "Types.cpp",
+        "Validation.cpp",
+    ],
+    local_include_dirs: ["include/nnapi"],
+    export_include_dirs: ["include"],
+    static_libs: [
+        "libbase_ndk",
+    ],
+    export_static_lib_headers: [
+        "libbase_ndk",
+    ],
+}
+
 cc_defaults {
     name: "NeuralNetworksTest_common",
     defaults: ["neuralnetworks_float16"],
diff --git a/common/BufferTracker.cpp b/common/BufferTracker.cpp
index 73bfc80..a27af51 100644
--- a/common/BufferTracker.cpp
+++ b/common/BufferTracker.cpp
@@ -26,7 +26,7 @@
 #include <vector>
 
 #include "CpuExecutor.h"
-#include "Utils.h"
+#include "LegacyUtils.h"
 #include "nnapi/TypeUtils.h"
 #include "nnapi/Validation.h"
 
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index 55ce0c1..700efdd 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -19,11 +19,7 @@
 #include "CpuExecutor.h"
 
 #include <android-base/scopeguard.h>
-#include <android/hardware_buffer.h>
-#include <sys/mman.h>
-#include <vndk/hardware_buffer.h>
 
-#include <Eigen/Core>
 #include <limits>
 #include <memory>
 #include <utility>
@@ -32,6 +28,8 @@
 // b/109953668, disable OpenMP
 #ifdef NNAPI_OPENMP
 #include <omp.h>
+
+#include <Eigen/Core>
 #endif  // NNAPI_OPENMP
 
 #include <nnapi/SharedMemory.h>
@@ -387,6 +385,7 @@
     return true;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 template <typename T>
 inline bool convertToNhwcImpl(T* to, const T* from, const std::vector<uint32_t>& fromDim) {
     uint32_t spatialSize = fromDim[2] * fromDim[3];
@@ -504,6 +503,7 @@
     }
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 // Decrements the usage count for the operands listed.  Frees the memory
 // allocated for any temporary variable with a count of zero.
@@ -698,6 +698,7 @@
 }
 
 int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo* operands) {
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
     if (hasDeadlinePassed(mDeadline)) {
         return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT;
     }
@@ -1654,6 +1655,10 @@
 
     consumeOperationInputs(ins, operands);
     return result;
+#else
+    LOG(ERROR) << "Compabibility layer build does not support CPU execution";
+    return ANEURALNETWORKS_OP_FAILED;
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 }
 
 // Copies RunTimeOperandInfo, preserving the original lifetime and numberOfUsesLeft
diff --git a/common/ExecutionBurstServer.cpp b/common/ExecutionBurstServer.cpp
index 67d4ccb..96efb1f 100644
--- a/common/ExecutionBurstServer.cpp
+++ b/common/ExecutionBurstServer.cpp
@@ -31,6 +31,7 @@
 
 #include "HalInterfaces.h"
 #include "Tracing.h"
+#include "Utils.h"
 
 namespace android::nn {
 namespace {
diff --git a/common/GraphDump.cpp b/common/GraphDump.cpp
index 146e1c6..604bdd3 100644
--- a/common/GraphDump.cpp
+++ b/common/GraphDump.cpp
@@ -27,7 +27,7 @@
 #include <string>
 #include <utility>
 
-#include "Utils.h"
+#include "LegacyUtils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/IndexedShapeWrapper.cpp b/common/IndexedShapeWrapper.cpp
index 8101c01..675518c 100644
--- a/common/IndexedShapeWrapper.cpp
+++ b/common/IndexedShapeWrapper.cpp
@@ -20,7 +20,7 @@
 
 #include <vector>
 
-#include "Utils.h"
+#include "LegacyUtils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/LegacyUtils.cpp b/common/LegacyUtils.cpp
index 6810033..1d81e5f 100644
--- a/common/LegacyUtils.cpp
+++ b/common/LegacyUtils.cpp
@@ -206,7 +206,6 @@
     uint32_t outputCount;
     const uint32_t* outputIndexes;
     const Operand* operands;
-    Version version;
 };
 
 const char* OperationValidationContext::getOperationName() const {
diff --git a/common/LogTagTest.cpp b/common/LogTagTest.cpp
index 459afbe..7c6a4d5 100644
--- a/common/LogTagTest.cpp
+++ b/common/LogTagTest.cpp
@@ -19,7 +19,7 @@
 #include <android-base/logging.h>
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
-#include "Utils.h"
+#include "LegacyUtils.h"
 
 namespace log_tag_test {
 
diff --git a/common/LogTagTestExtra.cpp b/common/LogTagTestExtra.cpp
index 9e9a9a2..c9b8cb7 100644
--- a/common/LogTagTestExtra.cpp
+++ b/common/LogTagTestExtra.cpp
@@ -16,7 +16,7 @@
 
 #define LOG_TAG "SecondFileTag"
 
-#include "Utils.h"
+#include "LegacyUtils.h"
 
 namespace log_tag_test {
 
diff --git a/common/MetaModel.cpp b/common/MetaModel.cpp
index f6a3685..9f3baf2 100644
--- a/common/MetaModel.cpp
+++ b/common/MetaModel.cpp
@@ -28,8 +28,7 @@
 #include <vector>
 
 #include "GraphDump.h"
-#include "Utils.h"
-#include "ValidateHal.h"
+#include "LegacyUtils.h"
 #include "nnapi/TypeUtils.h"
 #include "nnapi/Types.h"
 #include "nnapi/Validation.h"
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp
index c5a71e9..dfd2f3b 100644
--- a/common/OperationsUtils.cpp
+++ b/common/OperationsUtils.cpp
@@ -24,8 +24,8 @@
 #include <sstream>
 #include <vector>
 
+#include "LegacyUtils.h"
 #include "Operations.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/QuantUtils.h b/common/QuantUtils.h
index 3da27e9..f6bfd16 100644
--- a/common/QuantUtils.h
+++ b/common/QuantUtils.h
@@ -10,8 +10,8 @@
 #include <limits>
 #include <memory>
 
+#include "LegacyUtils.h"
 #include "OperationsUtils.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/SharedMemoryAndroid.cpp b/common/SharedMemoryAndroid.cpp
index 3e7fa24..04c3414 100644
--- a/common/SharedMemoryAndroid.cpp
+++ b/common/SharedMemoryAndroid.cpp
@@ -19,12 +19,16 @@
 #include <android-base/logging.h>
 #include <android-base/mapped_file.h>
 #include <android-base/scopeguard.h>
-#include <android/hardware_buffer.h>
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <android/hidl/allocator/1.0/IAllocator.h>
 #include <hidl/HidlSupport.h>
 #include <hidlmemory/mapping.h>
 #include <sys/mman.h>
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+#ifndef NN_NO_AHWB
+#include <android/hardware_buffer.h>
 #include <vndk/hardware_buffer.h>
+#endif  // NN_NO_AHWB
 
 #include <algorithm>
 #include <any>
@@ -43,6 +47,7 @@
 namespace android::nn {
 namespace {
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 using ::android::hardware::hidl_memory;
 using ::android::hidl::allocator::V1_0::IAllocator;
 
@@ -157,6 +162,8 @@
     };
 }
 
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 struct MmapFdMappingContext {
     int prot;
     std::any context;
@@ -179,6 +186,8 @@
     return Mapping{.pointer = data, .size = size, .context = std::move(context)};
 }
 
+#ifndef NN_NO_AHWB
+
 static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
     return (value + multiple - 1) / multiple * multiple;
 }
@@ -245,11 +254,18 @@
            << "Unable to map non-BLOB AHardwareBuffer memory";
 }
 
+#endif  // NN_NO_AHWB
+
 }  // namespace
 
 GeneralResult<Memory> createSharedMemory(size_t size) {
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
     const auto memory = NN_TRY(allocateSharedMemory(size));
     return createSharedMemoryFromHidlMemory(memory);
+#else
+    (void)size;
+    return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Unimplemented";
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 }
 
 GeneralResult<Memory> createSharedMemoryFromFd(size_t size, int prot, int fd, size_t offset) {
@@ -278,10 +294,16 @@
 }
 
 GeneralResult<Memory> createSharedMemoryFromHidlMemory(const hardware::hidl_memory& memory) {
+#ifndef NN_NO_AHWB
     return createMemory(memory);
+#else
+    (void)memory;
+    return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "hidl_memory not supported";
+#endif  // NN_NO_AHWB
 }
 
 GeneralResult<Memory> createSharedMemoryFromAHWB(const AHardwareBuffer& ahwb) {
+#ifndef NN_NO_AHWB
     AHardwareBuffer_Desc bufferDesc;
     AHardwareBuffer_describe(&ahwb, &bufferDesc);
     const native_handle_t* handle = AHardwareBuffer_getNativeHandle(&ahwb);
@@ -300,21 +322,28 @@
             .size = 0,
             .name = "hardware_buffer",
     };
+#else
+    (void)ahwb;
+    return NN_ERROR(ErrorStatus::INVALID_ARGUMENT)
+           << "AHardwareBuffer memory not implemented for support library build yet";
+#endif  // NN_NO_AHWB
 }
 
 GeneralResult<Mapping> map(const Memory& memory) {
-    if (memory.name == "ashmem") {
-        return mapAshmem(memory);
-    }
     if (memory.name == "mmap_fd") {
         return mapMemFd(memory);
     }
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+    if (memory.name == "ashmem") {
+        return mapAshmem(memory);
+    }
     if (memory.name == "hardware_buffer_blob") {
         return mapAhwbBlobMemory(memory);
     }
     if (memory.name == "hardware_buffer") {
         return mapAhwbMemory(memory);
     }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
     return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Cannot map unknown memory " << memory.name;
 }
 
diff --git a/common/SharedMemoryHost.cpp b/common/SharedMemoryHost.cpp
index 4c73acb..f90fe4c 100644
--- a/common/SharedMemoryHost.cpp
+++ b/common/SharedMemoryHost.cpp
@@ -19,7 +19,6 @@
 #include <android-base/logging.h>
 #include <android-base/mapped_file.h>
 #include <cutils/ashmem.h>
-#include <cutils/native_handle.h>
 #include <sys/mman.h>
 
 #include <limits>
diff --git a/common/UtilsTest.cpp b/common/UtilsTest.cpp
index 1c97ccc..fc9f066 100644
--- a/common/UtilsTest.cpp
+++ b/common/UtilsTest.cpp
@@ -24,6 +24,7 @@
 #include "MemoryUtils.h"
 #include "OperationsUtils.cpp"
 #include "QuantUtils.h"
+#include "Utils.h"
 #include "ValidateHal.h"
 #include "nnapi/TypeUtils.h"
 #include "nnapi/Types.h"
diff --git a/common/include/ExecutionBurstController.h b/common/include/ExecutionBurstController.h
index 2a4de7d..544fb36 100644
--- a/common/include/ExecutionBurstController.h
+++ b/common/include/ExecutionBurstController.h
@@ -16,6 +16,13 @@
 
 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_EXECUTION_BURST_CONTROLLER_H
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_EXECUTION_BURST_CONTROLLER_H
+#ifdef NN_NO_BURST
+
+namespace android::nn {
+class ExecutionBurstController;
+}  // namespace android::nn
+
+#else
 
 #include <android-base/macros.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
@@ -342,4 +349,5 @@
 
 }  // namespace android::nn
 
+#endif  // NN_NO_BURST
 #endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_EXECUTION_BURST_CONTROLLER_H
diff --git a/common/include/LegacyHalUtils.h b/common/include/LegacyHalUtils.h
index 72ead2c..eef7ce0 100644
--- a/common/include/LegacyHalUtils.h
+++ b/common/include/LegacyHalUtils.h
@@ -178,20 +178,6 @@
 bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos,
                                          const hardware::hidl_vec<hardware::hidl_memory>& pools);
 
-struct ApiVersion {
-    Version canonical;
-    int64_t android;
-};
-
-constexpr auto kHalVersionV1_0ToApi =
-        ApiVersion{.canonical = Version::ANDROID_OC_MR1, .android = __ANDROID_API_O_MR1__};
-constexpr auto kHalVersionV1_1ToApi =
-        ApiVersion{.canonical = Version::ANDROID_P, .android = __ANDROID_API_P__};
-constexpr auto kHalVersionV1_2ToApi =
-        ApiVersion{.canonical = Version::ANDROID_Q, .android = __ANDROID_API_Q__};
-constexpr auto kHalVersionV1_3ToApi =
-        ApiVersion{.canonical = Version::ANDROID_R, .android = __ANDROID_API_R__};
-
 // Versioning
 
 bool compliantWithV1_0(const V1_0::Capabilities& capabilities);
diff --git a/common/include/LegacyUtils.h b/common/include/LegacyUtils.h
index 5a6e3bf..21e08c1 100644
--- a/common/include/LegacyUtils.h
+++ b/common/include/LegacyUtils.h
@@ -313,6 +313,20 @@
 uint32_t getProp(const char* str, uint32_t defaultValue = 0);
 #endif  // NN_DEBUGGABLE
 
+struct ApiVersion {
+    Version canonical;
+    int64_t android;
+};
+
+constexpr auto kHalVersionV1_0ToApi =
+        ApiVersion{.canonical = Version::ANDROID_OC_MR1, .android = __ANDROID_API_O_MR1__};
+constexpr auto kHalVersionV1_1ToApi =
+        ApiVersion{.canonical = Version::ANDROID_P, .android = __ANDROID_API_P__};
+constexpr auto kHalVersionV1_2ToApi =
+        ApiVersion{.canonical = Version::ANDROID_Q, .android = __ANDROID_API_Q__};
+constexpr auto kHalVersionV1_3ToApi =
+        ApiVersion{.canonical = Version::ANDROID_R, .android = __ANDROID_API_R__};
+
 }  // namespace nn
 }  // namespace android
 
diff --git a/common/include/OperationResolver.h b/common/include/OperationResolver.h
index 155341a..04719b9 100644
--- a/common/include/OperationResolver.h
+++ b/common/include/OperationResolver.h
@@ -17,8 +17,6 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATION_RESOLVER_H
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATION_RESOLVER_H
 
-#include <android-base/macros.h>
-
 #include <utility>
 
 #include "OperationsUtils.h"
diff --git a/common/include/Operations.h b/common/include/Operations.h
index e4df10f..9c02a69 100644
--- a/common/include/Operations.h
+++ b/common/include/Operations.h
@@ -17,6 +17,7 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_H
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_H
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include "operations/BidirectionalSequenceLSTM.h"
 #include "operations/Cast.h"
 #include "operations/EmbeddingLookup.h"
@@ -31,12 +32,15 @@
 #include "operations/RNN.h"
 #include "operations/SVDF.h"
 #include "operations/Tile.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 #include <stddef.h>
 
 #include <cstdint>
 #include <vector>
 
+#include "ActivationFunctor.h"
+
 namespace android {
 namespace nn {
 
diff --git a/common/include/Tracing.h b/common/include/Tracing.h
index e461b2b..1953fe1 100644
--- a/common/include/Tracing.h
+++ b/common/include/Tracing.h
@@ -17,8 +17,10 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_TRACING_H
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_TRACING_H
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #define ATRACE_TAG ATRACE_TAG_NNAPI
 #include <utils/Trace.h>
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 // Neural Networks API (NNAPI) systracing
 //
@@ -149,6 +151,8 @@
 #define NNTRACE_LAYER_OTHER "LO"
 #define NNTRACE_LAYER_UTILITY "LU"  // Code used from multiple layers
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+
 // Implementation
 //
 // Almost same as ATRACE_NAME, but enforcing explicit distinction between
@@ -163,6 +167,13 @@
     android::ScopedTrace PASTE(___tracer, __LINE__)(ATRACE_TAG, name); \
     (void)___tracer_1  // ensure switch is only used after a basic trace
 
+#else
+
+#define NNTRACE_NAME_1(name)       // empty
+#define NNTRACE_NAME_SWITCH(name)  // empty
+
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 // Disallow use of raw ATRACE macros
 #undef ATRACE_NAME
 #undef ATRACE_CALL
diff --git a/common/operations/Activation.cpp b/common/operations/Activation.cpp
index 651cd02..c4c4089 100644
--- a/common/operations/Activation.cpp
+++ b/common/operations/Activation.cpp
@@ -16,21 +16,24 @@
 
 #define LOG_TAG "Operations"
 
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "ActivationFunctor.h"
+#include "OperationResolver.h"
+#include "OperationsUtils.h"
+#include "Tracing.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
 #include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
 #include <tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h>
 #include <tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h>
 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
 
-#include <algorithm>
-#include <limits>
-#include <vector>
-
-#include "ActivationFunctor.h"
 #include "CpuOperationUtils.h"
-#include "OperationResolver.h"
-#include "OperationsUtils.h"
-#include "Tracing.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -43,6 +46,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -352,6 +356,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(OperationType opType, const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -399,6 +404,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(OperationType opType, IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     if (opType != OperationType::HARD_SWISH) {
@@ -616,6 +622,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation TANH";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace activation
 
diff --git a/common/operations/BidirectionalSequenceLSTM.cpp b/common/operations/BidirectionalSequenceLSTM.cpp
index 6cf095b..6be67b0 100644
--- a/common/operations/BidirectionalSequenceLSTM.cpp
+++ b/common/operations/BidirectionalSequenceLSTM.cpp
@@ -18,6 +18,8 @@
 
 #include "BidirectionalSequenceLSTM.h"
 
+#include <tensorflow/lite/kernels/internal/tensor_utils.h>
+
 #include <algorithm>
 #include <vector>
 
diff --git a/common/operations/BidirectionalSequenceLSTM.h b/common/operations/BidirectionalSequenceLSTM.h
index 7077d3b..d697867 100644
--- a/common/operations/BidirectionalSequenceLSTM.h
+++ b/common/operations/BidirectionalSequenceLSTM.h
@@ -17,8 +17,6 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_BIDIRECTIONAL_SEQUENCE_LSTM_H
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_BIDIRECTIONAL_SEQUENCE_LSTM_H
 
-#include <tensorflow/lite/kernels/internal/tensor_utils.h>
-
 #include <algorithm>
 #include <cmath>
 #include <vector>
diff --git a/common/operations/BidirectionalSequenceRNN.cpp b/common/operations/BidirectionalSequenceRNN.cpp
index 5a020d1..3a63bdb 100644
--- a/common/operations/BidirectionalSequenceRNN.cpp
+++ b/common/operations/BidirectionalSequenceRNN.cpp
@@ -58,6 +58,7 @@
 constexpr uint32_t kFwOutputHiddenStateTensor = 2;
 constexpr uint32_t kBwOutputHiddenStateTensor = 3;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -312,6 +313,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -341,6 +343,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     const bool mergeOutputs = context->getInputValue<bool>(kMergeOutputsParam);
     const int32_t numOutputs = context->getNumOutputs();
@@ -468,6 +471,7 @@
     }
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace bidirectional_sequence_rnn
 
diff --git a/common/operations/Broadcast.cpp b/common/operations/Broadcast.cpp
index a2d5b8a..6854fd0 100644
--- a/common/operations/Broadcast.cpp
+++ b/common/operations/Broadcast.cpp
@@ -18,6 +18,16 @@
 
 #define LOG_TAG "Operations"
 
+#include <algorithm>
+#include <vector>
+
+#include "IndexedShapeWrapper.h"
+#include "OperationResolver.h"
+#include "Tracing.h"
+#include "nnapi/Types.h"
+#include "nnapi/Validation.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/integer_ops/add.h>
 #include <tensorflow/lite/kernels/internal/optimized/integer_ops/mul.h>
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -25,15 +35,8 @@
 #include <tensorflow/lite/kernels/internal/reference/integer_ops/mul.h>
 #include <tensorflow/lite/kernels/internal/types.h>
 
-#include <algorithm>
-#include <vector>
-
 #include "CpuOperationUtils.h"
-#include "IndexedShapeWrapper.h"
-#include "OperationResolver.h"
-#include "Tracing.h"
-#include "nnapi/Types.h"
-#include "nnapi/Validation.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -48,6 +51,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 #define ANDROID_NN_MACRO_DISPATCH(macro)                                \
@@ -433,6 +437,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(OperationType opType, const IOperationValidationContext* context) {
     auto minSupportedVersion = (opType == OperationType::DIV || opType == OperationType::SUB)
@@ -476,6 +481,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input1 = context->getInputShape(kInputTensor1);
     Shape input2 = context->getInputShape(kInputTensor2);
@@ -677,6 +683,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation DIV";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace broadcast
 
diff --git a/common/operations/Concatenation.cpp b/common/operations/Concatenation.cpp
index 8c08fd0..6047927 100644
--- a/common/operations/Concatenation.cpp
+++ b/common/operations/Concatenation.cpp
@@ -16,20 +16,23 @@
 
 #define LOG_TAG "Operations"
 
+#include <algorithm>
+#include <iterator>
+#include <vector>
+
+#include "OperationResolver.h"
+#include "OperationsUtils.h"
+#include "Tracing.h"
+#include "nnapi/Validation.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
 #include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
 #include <tensorflow/lite/kernels/internal/types.h>
 
-#include <algorithm>
-#include <iterator>
-#include <vector>
-
 #include "CpuOperationUtils.h"
-#include "OperationResolver.h"
-#include "OperationsUtils.h"
-#include "Tracing.h"
-#include "nnapi/Validation.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -40,6 +43,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -135,6 +139,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     uint32_t inputCount = context->getNumInputs();
@@ -173,6 +178,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     uint32_t numInputs = context->getNumInputs();
     NN_RET_CHECK_GE(numInputs, 2);
@@ -220,6 +226,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace concatenation
 
diff --git a/common/operations/Conv2D.cpp b/common/operations/Conv2D.cpp
index 6d98982..ce3c4db 100644
--- a/common/operations/Conv2D.cpp
+++ b/common/operations/Conv2D.cpp
@@ -16,21 +16,24 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/reference/integer_ops/conv.h>
-#include <tensorflow/lite/kernels/internal/types.h>
-
 #include <algorithm>
 #include <iterator>
 #include <memory>
 #include <vector>
 
-#include "CpuOperationUtils.h"
+#include "LegacyUtils.h"
 #include "OperationResolver.h"
 #include "Operations.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
-#include "Utils.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/reference/integer_ops/conv.h>
+#include <tensorflow/lite/kernels/internal/types.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -126,6 +129,7 @@
     }
 };
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #define ANDROID_NN_CONV_PARAMETERS(Type)                                          \
     uint32_t height = getSizeOfDimension(inputShape, 1);                          \
     uint32_t width = getSizeOfDimension(inputShape, 2);                           \
@@ -523,6 +527,7 @@
 }
 
 #undef ANDROID_NN_CONV_PARAMETERS
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace
 
@@ -627,6 +632,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape filter = context->getInputShape(kFilterTensor);
@@ -791,6 +797,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace conv_2d
 
diff --git a/common/operations/DepthwiseConv2D.cpp b/common/operations/DepthwiseConv2D.cpp
index 64bd7dd..815a860 100644
--- a/common/operations/DepthwiseConv2D.cpp
+++ b/common/operations/DepthwiseConv2D.cpp
@@ -16,17 +16,20 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h>
-#include <tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h>
-
 #include <algorithm>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Operations.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h>
+#include <tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace depthwise_conv_2d {
@@ -40,6 +43,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 struct DepthwiseConv2dParam {
@@ -412,6 +416,7 @@
 #undef ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     const uint32_t numInputs = context->getNumInputs();
@@ -510,6 +515,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape filter = context->getInputShape(kFilterTensor);
@@ -674,6 +680,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace depthwise_conv_2d
 
diff --git a/common/operations/ExpandDims.cpp b/common/operations/ExpandDims.cpp
index 2f546c9..435b3c7 100644
--- a/common/operations/ExpandDims.cpp
+++ b/common/operations/ExpandDims.cpp
@@ -18,7 +18,7 @@
 
 #include "ExpandDims.h"
 
-#include "Utils.h"
+#include "LegacyUtils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/operations/FullyConnected.cpp b/common/operations/FullyConnected.cpp
index ab50d31..176425c 100644
--- a/common/operations/FullyConnected.cpp
+++ b/common/operations/FullyConnected.cpp
@@ -14,19 +14,22 @@
  * limitations under the License.
  */
 
-#include "tensorflow/lite/kernels/internal/types.h"
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h>
-#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
-
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h>
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+#include <tensorflow/lite/kernels/internal/types.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace fully_connected {
@@ -44,6 +47,7 @@
 
 namespace {
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 // executionMutex is used to protect concurrent access of non-threadsafe resources
 // like gemmlowp::GemmContext.
 // std::mutex is safe for pthreads on Android.
@@ -176,6 +180,7 @@
 
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 bool validateShapes(const Shape& input, const Shape& weights, const Shape& bias,
                     Shape* output = nullptr) {
@@ -286,6 +291,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape weights = context->getInputShape(kWeightsTensor);
@@ -343,6 +349,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace fully_connected
 
diff --git a/common/operations/GenerateProposals.cpp b/common/operations/GenerateProposals.cpp
index 95e3676..2f30a72 100644
--- a/common/operations/GenerateProposals.cpp
+++ b/common/operations/GenerateProposals.cpp
@@ -23,15 +23,19 @@
 #include <utility>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace bbox_ops {
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 struct BoxEncodingCorner {
@@ -183,6 +187,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace axis_aligned_bbox_transform {
 
@@ -221,6 +226,7 @@
     return Version::ANDROID_Q;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape roiShape = context->getInputShape(kRoiTensor);
     Shape bboxDeltasShape = context->getInputShape(kDeltaTensor);
@@ -322,6 +328,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace axis_aligned_bbox_transform
 
@@ -346,6 +353,7 @@
 constexpr uint32_t kOutputClassTensor = 2;
 constexpr uint32_t kOutputBatchesTensor = 3;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 // TODO(xusongw): Reduce code duplication with hard/soft nms path.
@@ -700,6 +708,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -746,6 +755,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape scoreShape = context->getInputShape(kScoreTensor);
     Shape roiShape = context->getInputShape(kRoiTensor);
@@ -898,6 +908,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace box_with_nms_limit
 
@@ -923,6 +934,7 @@
 constexpr uint32_t kOutputRoiTensor = 1;
 constexpr uint32_t kOutputBatchesTensor = 2;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 void filterBoxes(const float* roiBase, const float* imageInfoBase, float minSize,
@@ -1210,6 +1222,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -1272,6 +1285,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     bool useNchw = context->getInputValue<bool>(kLayoutScalar);
     Shape scoreShape = context->getInputShape(kScoreTensor);
@@ -1401,6 +1415,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace generate_proposals
 
@@ -1430,6 +1445,7 @@
 constexpr uint32_t kOutputClassTensor = 2;
 constexpr uint32_t kOutputDetectionTensor = 3;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 bool detectionPostprocessFloat32(
@@ -1566,6 +1582,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -1598,6 +1615,7 @@
     return Version::ANDROID_Q;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape scoreShape = context->getInputShape(kScoreTensor);
     Shape deltasShape = context->getInputShape(kDeltaTensor);
@@ -1734,6 +1752,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace detection_postprocess
 
diff --git a/common/operations/HeatmapMaxKeypoint.cpp b/common/operations/HeatmapMaxKeypoint.cpp
index 63fc597..b902e08 100644
--- a/common/operations/HeatmapMaxKeypoint.cpp
+++ b/common/operations/HeatmapMaxKeypoint.cpp
@@ -21,11 +21,14 @@
 #include <cmath>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace heatmap_max_keypoint {
@@ -41,6 +44,7 @@
 constexpr uint32_t kOutputScoreTensor = 0;
 constexpr uint32_t kOutputKeypointTensor = 1;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 // This function uses Taylor expansion up to the quatratic term to approximate bicubic
@@ -223,6 +227,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -252,6 +257,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     bool layout = context->getInputValue<bool>(kLayoutScalar);
     Shape heatmapShape = context->getInputShape(kHeatmapTensor);
@@ -355,6 +361,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace heatmap_max_keypoint
 
diff --git a/common/operations/InstanceNormalization.cpp b/common/operations/InstanceNormalization.cpp
index 1a0e488..0da0e05 100644
--- a/common/operations/InstanceNormalization.cpp
+++ b/common/operations/InstanceNormalization.cpp
@@ -19,10 +19,13 @@
 #include <cmath>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace instance_normalization {
@@ -39,6 +42,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -98,6 +102,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -118,6 +123,7 @@
     return Version::ANDROID_Q;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
@@ -148,6 +154,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace instance_normalization
 
diff --git a/common/operations/L2Normalization.cpp b/common/operations/L2Normalization.cpp
index 05682ea..7d9adf4 100644
--- a/common/operations/L2Normalization.cpp
+++ b/common/operations/L2Normalization.cpp
@@ -16,16 +16,19 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h>
-
 #include <algorithm>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace l2_norm {
@@ -39,6 +42,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline bool l2normFloat32Impl(const float* inputData, const Shape& inputShape, int32_t axis,
@@ -195,6 +199,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK(context->getNumInputs() == kNumInputs ||
@@ -228,6 +233,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     const Shape& input = context->getInputShape(kInputTensor);
     int32_t numDimensions = getNumberOfDimensions(input);
@@ -283,6 +289,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace l2_norm
 
diff --git a/common/operations/LSHProjection.cpp b/common/operations/LSHProjection.cpp
index 14d7a79..c3f051d 100644
--- a/common/operations/LSHProjection.cpp
+++ b/common/operations/LSHProjection.cpp
@@ -23,8 +23,8 @@
 #include <memory>
 
 #include "CpuExecutor.h"
+#include "LegacyUtils.h"
 #include "Tracing.h"
-#include "Utils.h"
 #include "nnapi/Types.h"
 
 namespace android {
diff --git a/common/operations/LSTM.cpp b/common/operations/LSTM.cpp
index e64d0c4..5c16ccf 100644
--- a/common/operations/LSTM.cpp
+++ b/common/operations/LSTM.cpp
@@ -18,13 +18,15 @@
 
 #include "LSTM.h"
 
+#include <tensorflow/lite/kernels/internal/tensor_utils.h>
+
 #include <vector>
 
 #include "CpuExecutor.h"
 #include "CpuOperationUtils.h"
+#include "LegacyUtils.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
-#include "Utils.h"
 #include "nnapi/Types.h"
 
 namespace android {
diff --git a/common/operations/LocalResponseNormalization.cpp b/common/operations/LocalResponseNormalization.cpp
index ed16dec..6ad801d 100644
--- a/common/operations/LocalResponseNormalization.cpp
+++ b/common/operations/LocalResponseNormalization.cpp
@@ -16,15 +16,18 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
-
 #include <algorithm>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace local_response_norm {
@@ -42,6 +45,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline bool localResponseNormFloat32Impl(const float* inputData, const Shape& inputShape,
@@ -129,6 +133,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK(context->getNumInputs() == kNumInputs ||
@@ -173,6 +178,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     const Shape& input = context->getInputShape(kInputTensor);
     int32_t numDimensions = getNumberOfDimensions(input);
@@ -195,6 +201,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace local_response_norm
 
diff --git a/common/operations/Multinomial.cpp b/common/operations/Multinomial.cpp
index 6521bc7..4b12f8f 100644
--- a/common/operations/Multinomial.cpp
+++ b/common/operations/Multinomial.cpp
@@ -20,15 +20,21 @@
 
 #include <algorithm>
 #include <limits>
-#include <unsupported/Eigen/CXX11/Tensor>
 #include <vector>
 
 #include "CpuExecutor.h"
-#include "CpuOperationUtils.h"
 #include "Tracing.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/tensor_utils.h>
+
+#include <unsupported/Eigen/CXX11/Tensor>
+
+#include "CpuOperationUtils.h"
 #include "guarded_philox_random.h"
 #include "philox_random.h"
 #include "simple_philox.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
diff --git a/common/operations/Multinomial.h b/common/operations/Multinomial.h
index bdfe587..ba0b114 100644
--- a/common/operations/Multinomial.h
+++ b/common/operations/Multinomial.h
@@ -17,8 +17,6 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_MULTINOMIAL_H
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_MULTINOMIAL_H
 
-#include <tensorflow/lite/kernels/internal/tensor_utils.h>
-
 #include <algorithm>
 #include <cmath>
 #include <vector>
diff --git a/common/operations/PRelu.cpp b/common/operations/PRelu.cpp
index 88e38fc..60d7210 100644
--- a/common/operations/PRelu.cpp
+++ b/common/operations/PRelu.cpp
@@ -16,7 +16,9 @@
 
 #define LOG_TAG "Operations"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 #include <algorithm>
 #include <vector>
@@ -39,6 +41,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 template <typename T>
 inline bool eval(const std::function<T(const T&, const T&)>& func, const T* aData,
                  const Shape& aShape, const T* bData, const Shape& bShape, T* outputData,
@@ -94,6 +97,7 @@
             },
             aData, aShape, bData, bShape, outputData, outputShape);
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -113,6 +117,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape alpha = context->getInputShape(kAlphaTensor);
@@ -166,6 +171,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace prelu
 
diff --git a/common/operations/Pooling.cpp b/common/operations/Pooling.cpp
index 6cd2864..20a5c76 100644
--- a/common/operations/Pooling.cpp
+++ b/common/operations/Pooling.cpp
@@ -16,16 +16,19 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h>
-
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 #include "nnapi/Validation.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 
@@ -36,6 +39,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 struct PoolingParam {
@@ -287,6 +291,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(OperationType opType, const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
@@ -352,6 +357,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
@@ -431,6 +437,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation MAX_POOL_2D";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 #undef POOLING_DISPATCH_INPUT_TYPE
 
diff --git a/common/operations/QLSTM.cpp b/common/operations/QLSTM.cpp
index e8c4f90..4dad7f7 100644
--- a/common/operations/QLSTM.cpp
+++ b/common/operations/QLSTM.cpp
@@ -20,7 +20,9 @@
 
 #include "CpuExecutor.h"
 #include "OperationsUtils.h"
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include "QuantUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -360,6 +362,7 @@
            context->setOutputShape(kOutputTensor, outputShape);
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool execute(IOperationExecutionContext* context) {
     // Gets the inputs.
     const Shape inputShape = context->getInputShape(kInputTensor);
@@ -792,6 +795,7 @@
 
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace qlstm
 
diff --git a/common/operations/Rank.cpp b/common/operations/Rank.cpp
index f636341..23f5c1e 100644
--- a/common/operations/Rank.cpp
+++ b/common/operations/Rank.cpp
@@ -16,9 +16,9 @@
 
 #define LOG_TAG "Operations"
 
+#include "LegacyUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/operations/Reduce.cpp b/common/operations/Reduce.cpp
index 9eb1956..a73b652 100644
--- a/common/operations/Reduce.cpp
+++ b/common/operations/Reduce.cpp
@@ -16,7 +16,9 @@
 
 #define LOG_TAG "Operations"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 #include <algorithm>
 #include <limits>
@@ -43,6 +45,7 @@
 constexpr _Float16 kFloat16Max = 65504;
 constexpr _Float16 kFloat16Lowest = -kFloat16Max;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -65,6 +68,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validateProdSum(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -122,6 +126,7 @@
     return Version::ANDROID_Q;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape inputShape = context->getInputShape(kInputTensor);
     const uint32_t inputRank = getNumberOfDimensions(inputShape);
@@ -247,6 +252,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation REDUCE_ALL";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace reduce
 
diff --git a/common/operations/Reshape.cpp b/common/operations/Reshape.cpp
index 76effb8..35dee5f 100644
--- a/common/operations/Reshape.cpp
+++ b/common/operations/Reshape.cpp
@@ -24,9 +24,9 @@
 #include <vector>
 
 #include "CpuOperationUtils.h"
+#include "LegacyUtils.h"
 #include "Operations.h"
 #include "Tracing.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/operations/ResizeImageOps.cpp b/common/operations/ResizeImageOps.cpp
index 733bedb..ea3a7dd 100644
--- a/common/operations/ResizeImageOps.cpp
+++ b/common/operations/ResizeImageOps.cpp
@@ -16,17 +16,20 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
-
 #include <algorithm>
 #include <functional>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 #include "nnapi/Validation.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 
@@ -45,6 +48,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline float scaleHalfPixel(const int x, const float scale) {
@@ -168,6 +172,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(OperationType opType, const IOperationValidationContext* context) {
     const auto numInputs = context->getNumInputs();
@@ -221,6 +226,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(OperationType opType, IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
@@ -307,6 +313,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace resize_image
 
diff --git a/common/operations/RoiAlign.cpp b/common/operations/RoiAlign.cpp
index 3ca64f5..a6bba60 100644
--- a/common/operations/RoiAlign.cpp
+++ b/common/operations/RoiAlign.cpp
@@ -16,18 +16,21 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/common.h>
-
 #include <algorithm>
 #include <cfloat>
 #include <cmath>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/common.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace roi_align {
@@ -49,6 +52,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T_Input, typename T_Roi>
@@ -336,6 +340,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -378,6 +383,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     bool useNchw = context->getInputValue<bool>(kLayoutScalar);
     Shape input = context->getInputShape(kInputTensor);
@@ -500,6 +506,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace roi_align
 
diff --git a/common/operations/RoiPooling.cpp b/common/operations/RoiPooling.cpp
index 26e2213..15fc16c 100644
--- a/common/operations/RoiPooling.cpp
+++ b/common/operations/RoiPooling.cpp
@@ -21,11 +21,14 @@
 #include <cmath>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace roi_pooling {
@@ -45,6 +48,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T_Input, typename T_Roi>
@@ -183,6 +187,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -221,6 +226,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     bool useNchw = context->getInputValue<bool>(kLayoutScalar);
     Shape input = context->getInputShape(kInputTensor);
@@ -322,6 +328,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace roi_pooling
 
diff --git a/common/operations/Slice.cpp b/common/operations/Slice.cpp
index db47419..9b797d0 100644
--- a/common/operations/Slice.cpp
+++ b/common/operations/Slice.cpp
@@ -18,10 +18,13 @@
 
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "IndexedShapeWrapper.h"
 #include "OperationResolver.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace slice {
@@ -36,6 +39,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -77,6 +81,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -101,6 +106,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     const Shape& inputShape = context->getInputShape(kInputTensor);
     const int32_t n_dims = getNumberOfDimensions(inputShape);
@@ -186,6 +192,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace slice
 
diff --git a/common/operations/Softmax.cpp b/common/operations/Softmax.cpp
index 3e65d85..56de331 100644
--- a/common/operations/Softmax.cpp
+++ b/common/operations/Softmax.cpp
@@ -16,19 +16,22 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
-
 #include <algorithm>
 #include <cfloat>
 #include <limits>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 #include "nnapi/Validation.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 
@@ -44,6 +47,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline bool softmaxSlowFloat32(const float* inputData, const Shape& inputShape, const float beta,
@@ -226,6 +230,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK(context->getNumInputs() == kNumInputs ||
@@ -263,6 +268,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     float beta = (input.type == OperandType::TENSOR_FLOAT16)
@@ -310,6 +316,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace softmax
 
diff --git a/common/operations/Squeeze.cpp b/common/operations/Squeeze.cpp
index 2fe8eb8..c652005 100644
--- a/common/operations/Squeeze.cpp
+++ b/common/operations/Squeeze.cpp
@@ -66,6 +66,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     // Only the squeeze dims tensor can be omitted.
     NN_RET_CHECK(!context->isOmittedInput(kInputTensor));
@@ -137,6 +138,8 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for SQUEEZE op.";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 }  // namespace squeeze
 
 NN_REGISTER_OPERATION(SQUEEZE, "SQUEEZE", squeeze::validate, squeeze::prepare, squeeze::execute,
diff --git a/common/operations/StridedSlice.cpp b/common/operations/StridedSlice.cpp
index fd66ca7..e13c9b0 100644
--- a/common/operations/StridedSlice.cpp
+++ b/common/operations/StridedSlice.cpp
@@ -18,15 +18,18 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
-
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Operations.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace strided_slice {
@@ -43,6 +46,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -95,6 +99,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -132,6 +137,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     // StridedSlice op only supports 1D-4D input arrays.
     const Shape& inputShape = context->getInputShape(kInputTensor);
@@ -213,6 +219,8 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for STRIDED_SLICE op.";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 }  // namespace strided_slice
 
 NN_REGISTER_OPERATION(STRIDED_SLICE, "STRIDED_SLICE", strided_slice::validate,
diff --git a/common/operations/Transpose.cpp b/common/operations/Transpose.cpp
index 0e61575..0f2ae2a 100644
--- a/common/operations/Transpose.cpp
+++ b/common/operations/Transpose.cpp
@@ -16,14 +16,17 @@
 
 #define LOG_TAG "Operations"
 
+#include <vector>
+
+#include "OperationResolver.h"
+#include "Tracing.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
 
-#include <vector>
-
 #include "CpuOperationUtils.h"
-#include "OperationResolver.h"
-#include "Tracing.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -38,6 +41,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -68,6 +72,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -93,6 +98,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     // Only the permutation tensor can be omitted.
     NN_RET_CHECK(!context->isOmittedInput(kInputTensor));
@@ -168,6 +174,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace transpose
 
diff --git a/common/operations/TransposeConv2D.cpp b/common/operations/TransposeConv2D.cpp
index 002df27..0561e02 100644
--- a/common/operations/TransposeConv2D.cpp
+++ b/common/operations/TransposeConv2D.cpp
@@ -16,18 +16,21 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/common.h>
-
 #include <algorithm>
 #include <cfloat>
 #include <cmath>
 #include <memory>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/common.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace transpose_conv_2d {
@@ -104,6 +107,7 @@
     }
 };
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #define ANDROID_NN_TRANSPOSE_CONV_PARAMETERS                                    \
     uint32_t numBatches = getSizeOfDimension(inputShape, 0);                    \
     uint32_t inputHeight = getSizeOfDimension(inputShape, 1);                   \
@@ -430,6 +434,7 @@
 }
 
 #undef ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace
 
@@ -479,6 +484,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape filter = context->getInputShape(kFilterTensor);
@@ -620,6 +626,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace transpose_conv_2d
 
diff --git a/common/operations/UnidirectionalSequenceLSTM.cpp b/common/operations/UnidirectionalSequenceLSTM.cpp
index dc734e8..7d34023 100644
--- a/common/operations/UnidirectionalSequenceLSTM.cpp
+++ b/common/operations/UnidirectionalSequenceLSTM.cpp
@@ -19,7 +19,9 @@
 #include <vector>
 
 #include "IndexedShapeWrapper.h"
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include "LSTM.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 
@@ -85,6 +87,7 @@
 constexpr uint32_t kOutputStateOutTensor = 1;
 constexpr uint32_t kCellStateOutTensor = 2;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline bool hasTensor(IOperationExecutionContext* context, const uint32_t tensor) {
@@ -111,6 +114,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -166,6 +170,7 @@
     return minVersionSupported;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     // Check that none of the required inputs are omitted
     const std::vector<int> requiredInputs = {
@@ -512,6 +517,7 @@
     }
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace unidirectional_sequence_lstm
 
diff --git a/common/operations/UnidirectionalSequenceRNN.cpp b/common/operations/UnidirectionalSequenceRNN.cpp
index eaf60ed..a9ad503 100644
--- a/common/operations/UnidirectionalSequenceRNN.cpp
+++ b/common/operations/UnidirectionalSequenceRNN.cpp
@@ -42,6 +42,7 @@
 constexpr uint32_t kOutputTensor = 0;
 constexpr uint32_t kStateOutputTensor = 1;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -125,6 +126,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -147,6 +149,7 @@
     return minVersionSupported;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape weights = context->getInputShape(kWeightsTensor);
@@ -202,6 +205,7 @@
     }
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace unidirectional_sequence_rnn
 
diff --git a/driver/sample/SampleDriverFloatFast.cpp b/driver/sample/SampleDriverFloatFast.cpp
index 0ee4670..eae529c 100644
--- a/driver/sample/SampleDriverFloatFast.cpp
+++ b/driver/sample/SampleDriverFloatFast.cpp
@@ -25,7 +25,6 @@
 #include "HalInterfaces.h"
 #include "SampleDriverPartial.h"
 #include "Utils.h"
-#include "ValidateHal.h"
 
 namespace android {
 namespace nn {
diff --git a/driver/sample/SampleDriverFloatSlow.cpp b/driver/sample/SampleDriverFloatSlow.cpp
index 009cd5a..8e70dae 100644
--- a/driver/sample/SampleDriverFloatSlow.cpp
+++ b/driver/sample/SampleDriverFloatSlow.cpp
@@ -25,7 +25,6 @@
 #include "HalInterfaces.h"
 #include "SampleDriverPartial.h"
 #include "Utils.h"
-#include "ValidateHal.h"
 
 namespace android {
 namespace nn {
diff --git a/driver/sample/SampleDriverMinimal.cpp b/driver/sample/SampleDriverMinimal.cpp
index eef9937..b0863fa 100644
--- a/driver/sample/SampleDriverMinimal.cpp
+++ b/driver/sample/SampleDriverMinimal.cpp
@@ -25,7 +25,6 @@
 #include "NeuralNetworksOEM.h"
 #include "SampleDriverPartial.h"
 #include "Utils.h"
-#include "ValidateHal.h"
 
 namespace android {
 namespace nn {
diff --git a/driver/sample/SampleDriverPartial.h b/driver/sample/SampleDriverPartial.h
index 661227a..19ac4fd 100644
--- a/driver/sample/SampleDriverPartial.h
+++ b/driver/sample/SampleDriverPartial.h
@@ -25,7 +25,6 @@
 #include "HalInterfaces.h"
 #include "SampleDriver.h"
 #include "Utils.h"
-#include "ValidateHal.h"
 
 namespace android {
 namespace nn {
diff --git a/driver/sample/SampleDriverQuant.cpp b/driver/sample/SampleDriverQuant.cpp
index f73a6fd..0883eca 100644
--- a/driver/sample/SampleDriverQuant.cpp
+++ b/driver/sample/SampleDriverQuant.cpp
@@ -25,7 +25,6 @@
 #include "HalInterfaces.h"
 #include "SampleDriverPartial.h"
 #include "Utils.h"
-#include "ValidateHal.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 5a6ad5f..c90b13e 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -151,6 +151,46 @@
     apex_available: ["//apex_available:platform"],
 }
 
+cc_library_static {
+    name: "libneuralnetworks_cl",
+    defaults: [
+        "neuralnetworks_cl_defaults",
+        "neuralnetworks_defaults",
+    ],
+    apex_available: ["//apex_available:platform"],
+    // b/109953668, disable OpenMP
+    // openmp: true,
+    srcs: [
+        "BurstBuilder.cpp",
+        "CompilationBuilder.cpp",
+        "ExecutionBuilder.cpp",
+        "ExecutionCallback.cpp",
+        "ExecutionPlan.cpp",
+        "Manager.cpp",
+        "Memory.cpp",
+        "ModelArgumentInfo.cpp",
+        "ModelBuilder.cpp",
+        "NeuralNetworks.cpp",
+        "TypeManager.cpp",
+    ],
+    static_libs: [
+        "libbase_ndk",
+        "libcrypto_static",
+        "libneuralnetworks_common_cl",
+        "libtextclassifier_hash_static",
+        "neuralnetworks_types_cl",
+    ],
+    shared_libs: [
+        "libnativewindow",
+    ],
+    header_libs: [
+        "libneuralnetworks_headers_ndk",
+    ],
+    export_header_lib_headers: [
+        "libneuralnetworks_headers_ndk",
+    ],
+}
+
 ndk_headers {
     name: "libneuralnetworks_ndk_headers",
     from: "include",
diff --git a/runtime/AppInfoFetcher.h b/runtime/AppInfoFetcher.h
index e4717e6..8d7618f 100644
--- a/runtime/AppInfoFetcher.h
+++ b/runtime/AppInfoFetcher.h
@@ -17,13 +17,7 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_APP_INFO_FETCHER_H
 #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_APP_INFO_FETCHER_H
 
-#include <map>
-#include <set>
 #include <string>
-#include <vector>
-
-#include "HalInterfaces.h"
-#include "Manager.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/CompilationBuilder.cpp b/runtime/CompilationBuilder.cpp
index 5d2d5db..13d61e3 100644
--- a/runtime/CompilationBuilder.cpp
+++ b/runtime/CompilationBuilder.cpp
@@ -29,9 +29,9 @@
 #include "ExecutionBuilder.h"
 #include "ExecutionBurstController.h"
 #include "ExecutionPlan.h"
+#include "LegacyUtils.h"
 #include "Manager.h"
 #include "ModelBuilder.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/Event.h b/runtime/Event.h
index a9c17e9..90d5932 100644
--- a/runtime/Event.h
+++ b/runtime/Event.h
@@ -17,13 +17,13 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EVENT_H
 #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EVENT_H
 
+#include <android-base/logging.h>
 #include <nnapi/IPreparedModel.h>
 
 #include <memory>
 #include <utility>
 
 #include "ExecutionCallback.h"
-#include "HalInterfaces.h"
 
 namespace android::nn {
 
diff --git a/runtime/ExecutionBuilder.cpp b/runtime/ExecutionBuilder.cpp
index b73e4f2..bc3e54e 100644
--- a/runtime/ExecutionBuilder.cpp
+++ b/runtime/ExecutionBuilder.cpp
@@ -18,6 +18,7 @@
 
 #include "ExecutionBuilder.h"
 
+#include <android-base/logging.h>
 #include <nnapi/IPreparedModel.h>
 
 #include <algorithm>
@@ -36,13 +37,12 @@
 #include "ControlFlow.h"
 #include "CpuExecutor.h"
 #include "ExecutionBurstController.h"
-#include "HalInterfaces.h"
+#include "LegacyUtils.h"
 #include "Manager.h"
 #include "ModelArgumentInfo.h"
 #include "ModelBuilder.h"
 #include "Tracing.h"
 #include "TypeManager.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/ExecutionBuilder.h b/runtime/ExecutionBuilder.h
index 62d4cbd..65af1d8 100644
--- a/runtime/ExecutionBuilder.h
+++ b/runtime/ExecutionBuilder.h
@@ -30,7 +30,6 @@
 #include "ControlFlow.h"
 #include "CpuExecutor.h"
 #include "ExecutionCallback.h"
-#include "HalInterfaces.h"
 #include "Memory.h"
 #include "ModelArgumentInfo.h"
 #include "ModelBuilder.h"
diff --git a/runtime/ExecutionCallback.cpp b/runtime/ExecutionCallback.cpp
index fa6e17b..b2ac2ae 100644
--- a/runtime/ExecutionCallback.cpp
+++ b/runtime/ExecutionCallback.cpp
@@ -18,7 +18,6 @@
 
 #include "ExecutionCallback.h"
 
-#include <Utils.h>
 #include <android-base/logging.h>
 
 #include <limits>
diff --git a/runtime/ExecutionPlan.cpp b/runtime/ExecutionPlan.cpp
index 23740f6..47fbcec 100644
--- a/runtime/ExecutionPlan.cpp
+++ b/runtime/ExecutionPlan.cpp
@@ -19,7 +19,6 @@
 #include "ExecutionPlan.h"
 
 #include <fcntl.h>
-#include <openssl/sha.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 
@@ -44,6 +43,7 @@
 #include "ExecutionBurstController.h"
 #include "ExecutionCallback.h"
 #include "GraphDump.h"
+#include "LegacyUtils.h"
 #include "Manager.h"
 #include "MetaModel.h"
 #include "ModelBuilder.h"
@@ -51,7 +51,6 @@
 #include "TokenHasher.h"
 #include "Tracing.h"
 #include "TypeManager.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/ExecutionPlan.h b/runtime/ExecutionPlan.h
index df31a3f..e9b06a9 100644
--- a/runtime/ExecutionPlan.h
+++ b/runtime/ExecutionPlan.h
@@ -21,7 +21,6 @@
 
 #include <android-base/logging.h>
 #include <nnapi/Types.h>
-#include <openssl/sha.h>
 
 #include <algorithm>
 #include <chrono>
@@ -36,12 +35,12 @@
 #include <variant>
 #include <vector>
 
+#include "LegacyUtils.h"
 #include "Memory.h"
 #include "ModelArgumentInfo.h"
 #include "ModelBuilder.h"
 #include "NeuralNetworks.h"
 #include "TokenHasher.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp
index 98d0ad4..b551b2e 100644
--- a/runtime/Manager.cpp
+++ b/runtime/Manager.cpp
@@ -18,13 +18,13 @@
 
 #include "Manager.h"
 
-#include <android-base/properties.h>
-#include <build/version.h>
-#include <cutils/native_handle.h>
+#include <CpuExecutor.h>
+#include <ExecutionBurstController.h>
+#include <LegacyUtils.h>
 #include <nnapi/IDevice.h>
 #include <nnapi/IPreparedModel.h>
-#include <nnapi/hal/1.3/Buffer.h>
-#include <nnapi/hal/Service.h>
+#include <nnapi/SharedMemory.h>
+#include <nnapi/Validation.h>
 
 #include <algorithm>
 #include <functional>
@@ -34,17 +34,27 @@
 #include <utility>
 #include <vector>
 
-#include "AppInfoFetcher.h"
-#include "CpuExecutor.h"
-#include "ExecutionBurstController.h"
 #include "ExecutionCallback.h"
-#include "HalInterfaces.h"
 #include "Memory.h"
 #include "MetaModel.h"
 #include "ModelArgumentInfo.h"
 #include "Tracing.h"
 #include "TypeManager.h"
-#include "Utils.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <build/version.h>
+#include <cutils/native_handle.h>
+#include <nnapi/hal/1.3/Buffer.h>
+#include <nnapi/hal/Service.h>
+
+#include "AppInfoFetcher.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
+#ifndef NN_NO_BURST
+#include <HalInterfaces.h>
+#include <LegacyHalUtils.h>
+#include <android-base/properties.h>
+#endif  // NN_NO_BURST
 
 namespace android {
 namespace nn {
@@ -122,6 +132,7 @@
 #endif  // NN_DEBUGGABLE
 };
 
+#ifndef NN_NO_BURST
 // This is the amount of time the ExecutionBurstController should spend polling
 // the FMQ to see if it has data available before it should fall back to
 // waiting on the futex.
@@ -137,6 +148,7 @@
     return std::chrono::microseconds{defaultPollingTimeWindow};
 #endif  // NN_DEBUGGABLE
 }
+#endif  // NN_NO_BURST
 
 // A RuntimePreparedModel with underlying IPreparedModel instance return by actual driver.
 class DriverPreparedModel : public RuntimePreparedModel {
@@ -167,6 +179,7 @@
 
     std::shared_ptr<ExecutionBurstController> configureExecutionBurst(
             bool preferPowerOverLatency) const override {
+#ifndef NN_NO_BURST
         std::any resource = mPreparedModel->getUnderlyingResource();
         sp<V1_2::IPreparedModel> preparedModel;
         if (auto* preparedModelV1_3 = std::any_cast<sp<V1_3::IPreparedModel>>(&resource)) {
@@ -179,6 +192,11 @@
         const auto pollingTimeWindow =
                 (preferPowerOverLatency ? std::chrono::microseconds{0} : getPollingTimeWindow());
         return ExecutionBurstController::create(preparedModel, pollingTimeWindow);
+#else
+        (void)preferPowerOverLatency;
+        LOG(ERROR) << "DriverPreparedModel::configureExecutionBurst: built without burst support";
+        return nullptr;
+#endif  // NN_NO_BURST
     }
 
    private:
@@ -549,6 +567,7 @@
     // compute using burst if present
     const bool burstCompute = (burstController != nullptr);
     bool burstFallback = true;
+#ifndef NN_NO_BURST
     if (burstCompute) {
         const bool compliant = compliantWithV1_2(convertToV1_3(request));
         if (compliant) {
@@ -570,6 +589,9 @@
             timing = uncheckedConvert(halTiming);
         }
     }
+#else
+    CHECK(!burstCompute) << "built without burst";
+#endif  // NN_NO_BURST
 
     // compute from IPreparedModel if either:
     // (1) burst was not supplied, or
@@ -675,7 +697,7 @@
     SyncFence syncFence = SyncFence::createAsSignaled();
     ExecuteFencedInfoCallback executeFencedInfoCallback = nullptr;
     Timing timing = {};
-    if (mDevice->getFeatureLevel() >= __ANDROID_API_R__) {
+    if (mDevice->getFeatureLevel() >= kHalVersionV1_3ToApi.android) {
         auto result = mPreparedModel->executeFenced(request, waitForHandles, measure, deadline,
                                                     loopTimeoutDuration, timeoutDurationAfterFence);
         if (!result.ok()) {
@@ -784,7 +806,11 @@
     CpuDevice() = default;
     const int64_t kFeatureLevel = __ANDROID_API__;
     const std::string kName = "nnapi-reference";
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
     const std::string kVersionString = build::GetBuildNumber();
+#else
+    const std::string kVersionString = "UNKNOWN";
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
     // Since the performance is a ratio compared to the CPU performance,
     // by definition the performance of the CPU is 1.0.
     const Capabilities::PerformanceInfo kPerformance = {.execTime = 1.0f, .powerUsage = 1.0f};
@@ -1024,26 +1050,39 @@
     return driverDevice;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+std::vector<SharedDevice> getDevices() {
+    return hal::getDevices();
+}
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 void DeviceManager::findAvailableDevices() {
     VLOG(MANAGER) << "findAvailableDevices";
 
     // register driver devices
-    std::vector<SharedDevice> devices = hal::getDevices();
+    std::vector<SharedDevice> devices = getDevices();
     for (const auto& device : devices) {
         VLOG(MANAGER) << "Found interface " << device->getName();
         registerDevice(device);
     }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
     // register CPU fallback device
     mDevices.push_back(CpuDevice::get());
     mDevicesCpuOnly.push_back(CpuDevice::get());
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 }
 
 static bool updatableDriversAreAllowed() {
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
     const auto& appInfo = AppInfoFetcher::get()->getAppInfo();
     const bool currentProcessIsOnThePlatform =
             appInfo.appIsSystemApp || appInfo.appIsOnVendorImage || appInfo.appIsOnProductImage;
     return !currentProcessIsOnThePlatform;
+#else
+    // The concept does not exist in the compatibility library build.
+    return true;
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 }
 
 void DeviceManager::registerDevice(const SharedDevice& device) {
diff --git a/runtime/Manager.h b/runtime/Manager.h
index 33dabcd..2c51fc9 100644
--- a/runtime/Manager.h
+++ b/runtime/Manager.h
@@ -18,6 +18,7 @@
 #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MANAGER_H
 
 #include <android-base/macros.h>
+#include <nnapi/IDevice.h>
 #include <nnapi/IPreparedModel.h>
 
 #include <map>
@@ -29,9 +30,8 @@
 #include <vector>
 
 #include "ExecutionCallback.h"
-#include "HalInterfaces.h"
+#include "LegacyUtils.h"
 #include "Memory.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
@@ -211,6 +211,8 @@
     bool mStrictSlicing = false;
 };
 
+std::vector<SharedDevice> getDevices();
+
 }  // namespace nn
 }  // namespace android
 
diff --git a/runtime/Memory.cpp b/runtime/Memory.cpp
index e3d2c24..0d91481 100644
--- a/runtime/Memory.cpp
+++ b/runtime/Memory.cpp
@@ -18,14 +18,13 @@
 
 #include "Memory.h"
 
+#include <CpuExecutor.h>
+#include <ExecutionBurstController.h>
+#include <LegacyUtils.h>
 #include <android-base/scopeguard.h>
-#include <android/hardware_buffer.h>
-#include <cutils/native_handle.h>
-#include <hidl/HidlSupport.h>
 #include <nnapi/SharedMemory.h>
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
-#include <vndk/hardware_buffer.h>
 
 #include <algorithm>
 #include <memory>
@@ -35,11 +34,13 @@
 #include <vector>
 
 #include "CompilationBuilder.h"
-#include "CpuExecutor.h"
-#include "ExecutionBurstController.h"
 #include "Manager.h"
 #include "TypeManager.h"
-#include "Utils.h"
+
+#ifndef NN_NO_AHWB
+#include <android/hardware_buffer.h>
+#include <vndk/hardware_buffer.h>
+#endif  // NN_NO_AHWB
 
 namespace android {
 namespace nn {
@@ -193,11 +194,13 @@
 RuntimeMemory::RuntimeMemory(SharedBuffer buffer) : kBuffer(std::move(buffer)) {}
 
 RuntimeMemory::~RuntimeMemory() {
+#ifndef NN_NO_BURST
     for (const auto& [ptr, weakBurst] : mUsedBy) {
         if (const std::shared_ptr<ExecutionBurstController> burst = weakBurst.lock()) {
             burst->freeMemory(getKey());
         }
     }
+#endif  // NN_NO_BURST
 }
 
 Request::MemoryPool RuntimeMemory::getMemoryPool() const {
@@ -466,9 +469,13 @@
         LOG(INFO) << "MemoryBuilder::finish -- cannot handle multiple devices.";
         mAllocator = nullptr;
     }
+#ifndef NN_NO_AHWB
     mSupportsAhwb = std::all_of(devices.begin(), devices.end(), [](const auto* device) {
-        return device->getFeatureLevel() >= __ANDROID_API_R__;
+        return device->getFeatureLevel() >= kHalVersionV1_3ToApi.android;
     });
+#else
+    mSupportsAhwb = false;
+#endif  // NN_NO_AHWB
     mShouldFallback = std::none_of(mRoles.begin(), mRoles.end(), [](const auto& role) {
         const auto* cb = std::get<const CompilationBuilder*>(role);
         return cb->createdWithExplicitDeviceList();
diff --git a/runtime/Memory.h b/runtime/Memory.h
index b69fbed..573ac0b 100644
--- a/runtime/Memory.h
+++ b/runtime/Memory.h
@@ -23,7 +23,6 @@
 #include <nnapi/SharedMemory.h>
 #include <nnapi/Validation.h>
 #include <sys/mman.h>
-#include <vndk/hardware_buffer.h>
 
 #include <algorithm>
 #include <map>
@@ -36,8 +35,8 @@
 #include <vector>
 
 #include "CpuExecutor.h"
+#include "LegacyUtils.h"
 #include "NeuralNetworks.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/ModelArgumentInfo.cpp b/runtime/ModelArgumentInfo.cpp
index a6a8908..65e132a 100644
--- a/runtime/ModelArgumentInfo.cpp
+++ b/runtime/ModelArgumentInfo.cpp
@@ -22,10 +22,9 @@
 #include <utility>
 #include <vector>
 
-#include "HalInterfaces.h"
+#include "LegacyUtils.h"
 #include "NeuralNetworks.h"
 #include "TypeManager.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/ModelArgumentInfo.h b/runtime/ModelArgumentInfo.h
index d0e2bb0..a8cc858 100644
--- a/runtime/ModelArgumentInfo.h
+++ b/runtime/ModelArgumentInfo.h
@@ -20,8 +20,8 @@
 #include <utility>
 #include <vector>
 
+#include "LegacyUtils.h"
 #include "NeuralNetworks.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/ModelBuilder.cpp b/runtime/ModelBuilder.cpp
index 937b1ed..94ba3d7 100644
--- a/runtime/ModelBuilder.cpp
+++ b/runtime/ModelBuilder.cpp
@@ -27,9 +27,9 @@
 
 #include "CompilationBuilder.h"
 #include "GraphDump.h"
+#include "LegacyUtils.h"
 #include "Manager.h"
 #include "TypeManager.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/ModelBuilder.h b/runtime/ModelBuilder.h
index e89dc63..4e8bd4a 100644
--- a/runtime/ModelBuilder.h
+++ b/runtime/ModelBuilder.h
@@ -23,9 +23,9 @@
 #include <memory>
 #include <vector>
 
+#include "LegacyUtils.h"
 #include "Memory.h"
 #include "NeuralNetworks.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index 56e2165..4d2f484 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -22,8 +22,11 @@
 
 #include "NeuralNetworks.h"
 
+#include <ControlFlow.h>
+#include <LegacyUtils.h>
+#include <MetaModel.h>
+#include <Tracing.h>
 #include <nnapi/Types.h>
-#include <vndk/hardware_buffer.h>
 
 #include <algorithm>
 #include <cstddef>
@@ -33,18 +36,14 @@
 
 #include "BurstBuilder.h"
 #include "CompilationBuilder.h"
-#include "ControlFlow.h"
 #include "Event.h"
 #include "ExecutionBuilder.h"
 #include "ExecutionCallback.h"
 #include "Manager.h"
 #include "Memory.h"
-#include "MetaModel.h"
 #include "ModelBuilder.h"
 #include "NeuralNetworksExtensions.h"
 #include "NeuralNetworksOEM.h"
-#include "Tracing.h"
-#include "Utils.h"
 
 using namespace android::nn;
 
diff --git a/runtime/TypeManager.cpp b/runtime/TypeManager.cpp
index ded4d6b..dec2c6a 100644
--- a/runtime/TypeManager.cpp
+++ b/runtime/TypeManager.cpp
@@ -18,11 +18,13 @@
 
 #include "TypeManager.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <PackageInfo.h>
-#include <android-base/file.h>
-#include <android-base/properties.h>
 #include <binder/IServiceManager.h>
 #include <procpartition/procpartition.h>
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+#include <android-base/file.h>
+#include <android-base/properties.h>
 
 #include <algorithm>
 #include <limits>
@@ -32,8 +34,6 @@
 #include <string_view>
 #include <vector>
 
-#include "Utils.h"
-
 namespace android {
 namespace nn {
 
@@ -59,6 +59,8 @@
     return true;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+
 // Property for disabling NNAPI vendor extensions on product image (used on GSI /product image,
 // which can't use NNAPI vendor extensions).
 const char kVExtProductDeny[] = "ro.nnapi.extensions.deny_on_product";
@@ -110,17 +112,24 @@
 #endif  // __BIONIC__
 }
 
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 }  // namespace
 
 TypeManager::TypeManager() {
     VLOG(MANAGER) << "TypeManager::TypeManager";
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
     mExtensionsAllowed = TypeManager::isExtensionsUseAllowed(
             AppInfoFetcher::get()->getAppInfo(), isNNAPIVendorExtensionsUseAllowedInProductImage(),
             getVendorExtensionAllowlistedApps());
+#else
+    mExtensionsAllowed = true;
+#endif
     VLOG(MANAGER) << "NNAPI Vendor extensions enabled: " << mExtensionsAllowed;
     findAvailableExtensions();
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool TypeManager::isExtensionsUseAllowed(const AppInfoFetcher::AppInfo& appPackageInfo,
                                          bool useOnProductImageEnabled,
                                          const std::vector<std::string>& allowlist) {
@@ -161,6 +170,7 @@
     }
     return false;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 void TypeManager::findAvailableExtensions() {
     for (const std::shared_ptr<Device>& device : mDeviceManager->getDrivers()) {
diff --git a/runtime/TypeManager.h b/runtime/TypeManager.h
index f0aaa74..ba67ec4 100644
--- a/runtime/TypeManager.h
+++ b/runtime/TypeManager.h
@@ -22,10 +22,12 @@
 #include <string>
 #include <vector>
 
-#include "AppInfoFetcher.h"
-#include "HalInterfaces.h"
 #include "Manager.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "AppInfoFetcher.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 
@@ -108,6 +110,7 @@
     // available devices.
     void forTest_reset() { *this = TypeManager(); }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
     // Check if NNAPI Vendor extensions are usable in the process with the given app
     // and supplemental infomation.
     //
@@ -117,6 +120,7 @@
     static bool isExtensionsUseAllowed(const AppInfoFetcher::AppInfo& appPackageInfo,
                                        bool useOnProductImageEnabled,
                                        const std::vector<std::string>& allowlist);
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
    private:
     TypeManager();
diff --git a/runtime/test/TestExecution.cpp b/runtime/test/TestExecution.cpp
index ed248fe..b286234 100644
--- a/runtime/test/TestExecution.cpp
+++ b/runtime/test/TestExecution.cpp
@@ -36,7 +36,6 @@
 #include "PreparedModelCallback.h"
 #include "SampleDriver.h"
 #include "TestNeuralNetworksWrapper.h"
-#include "Utils.h"
 #include "ValidateHal.h"
 
 namespace android {
diff --git a/runtime/test/TestIntrospectionControl.cpp b/runtime/test/TestIntrospectionControl.cpp
index 6e70547..078b8f4 100644
--- a/runtime/test/TestIntrospectionControl.cpp
+++ b/runtime/test/TestIntrospectionControl.cpp
@@ -37,7 +37,6 @@
 #include "NeuralNetworksOEM.h"
 #include "SampleDriver.h"
 #include "TestNeuralNetworksWrapper.h"
-#include "Utils.h"
 #include "ValidateHal.h"
 
 namespace {
diff --git a/runtime/test/TestPartitioningRandom.cpp b/runtime/test/TestPartitioningRandom.cpp
index fa24f17..ce76421 100644
--- a/runtime/test/TestPartitioningRandom.cpp
+++ b/runtime/test/TestPartitioningRandom.cpp
@@ -43,7 +43,6 @@
 #include "NeuralNetworks.h"
 #include "SampleDriver.h"
 #include "TestNeuralNetworksWrapper.h"
-#include "Utils.h"
 #include "ValidateHal.h"
 
 // Uncomment the following line to generate some debugging output that
@@ -1140,7 +1139,7 @@
     if (compilationResult == Result::OP_FAILED && hasUnknownDimensions &&
         cNoFallback.getExecutionPlan().hasDynamicTemporaries() &&
         std::any_of(devices.begin(), devices.end(), [](const std::shared_ptr<Device>& device) {
-            return device->getFeatureLevel() < __ANDROID_API_Q__;
+            return device->getFeatureLevel() < nn::kHalVersionV1_2ToApi.android;
         })) {
         ASSERT_EQ(cWithFallback.setPartitioning(DeviceManager::kPartitioningWithFallback),
                   Result::NO_ERROR);
diff --git a/runtime/test/android_fuzzing/FuzzHarness.cpp b/runtime/test/android_fuzzing/FuzzHarness.cpp
index 17dac3a..55ff1e6 100644
--- a/runtime/test/android_fuzzing/FuzzHarness.cpp
+++ b/runtime/test/android_fuzzing/FuzzHarness.cpp
@@ -20,6 +20,7 @@
 #include <algorithm>
 
 #include "Converter.h"
+#include "LegacyUtils.h"
 #include "Model.pb.h"
 #include "TestHarness.h"
 
diff --git a/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp b/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp
index 92e9e47..b740f89 100644
--- a/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp
+++ b/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp
@@ -28,7 +28,6 @@
 #include "NeuralNetworksWrapperExtensions.h"
 #include "TestNeuralNetworksWrapper.h"
 #include "TypeManager.h"
-#include "Utils.h"
 #include "ValidateHal.h"
 
 namespace android {