Add support library compabibility layer

This change provides a new "libneuralnetworks_cl" static library that
contains a version of the NNAPI runtime wihout non-NDK dependencies.
Vendors can link against this library to create support library drivers.

This change introduces new preprocessor symbols:
- NN_COMPATIBILITY_LIBRARY_BUILD
- NN_NO_AHWB
- NN_NO_BURST

The last two temporarily limit corresponding runtime features and should
be removed by later changes.

If NN_COMPATIBILITY_LIBRARY_BUILD is defined, then NN_NO_AHWB and
NN_NO_BURST should be defined and NN_INCLUDE_CPU_IMPLEMENTATION should
be undefined.

Bug: 160667944
Bug: 170289677
Test: NNT_static
Change-Id: I5685d0e46dc9461d62815f61dddd5fc7aedf3762
Merged-In: I5685d0e46dc9461d62815f61dddd5fc7aedf3762
(cherry picked from commit 8c4917bbc4c123778d9c4d0b9a5df79897fc4051)
diff --git a/common/operations/Activation.cpp b/common/operations/Activation.cpp
index 651cd02..c4c4089 100644
--- a/common/operations/Activation.cpp
+++ b/common/operations/Activation.cpp
@@ -16,21 +16,24 @@
 
 #define LOG_TAG "Operations"
 
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "ActivationFunctor.h"
+#include "OperationResolver.h"
+#include "OperationsUtils.h"
+#include "Tracing.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
 #include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
 #include <tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h>
 #include <tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h>
 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
 
-#include <algorithm>
-#include <limits>
-#include <vector>
-
-#include "ActivationFunctor.h"
 #include "CpuOperationUtils.h"
-#include "OperationResolver.h"
-#include "OperationsUtils.h"
-#include "Tracing.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -43,6 +46,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -352,6 +356,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(OperationType opType, const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -399,6 +404,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(OperationType opType, IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     if (opType != OperationType::HARD_SWISH) {
@@ -616,6 +622,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation TANH";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace activation
 
diff --git a/common/operations/BidirectionalSequenceLSTM.cpp b/common/operations/BidirectionalSequenceLSTM.cpp
index 6cf095b..6be67b0 100644
--- a/common/operations/BidirectionalSequenceLSTM.cpp
+++ b/common/operations/BidirectionalSequenceLSTM.cpp
@@ -18,6 +18,8 @@
 
 #include "BidirectionalSequenceLSTM.h"
 
+#include <tensorflow/lite/kernels/internal/tensor_utils.h>
+
 #include <algorithm>
 #include <vector>
 
diff --git a/common/operations/BidirectionalSequenceLSTM.h b/common/operations/BidirectionalSequenceLSTM.h
index 7077d3b..d697867 100644
--- a/common/operations/BidirectionalSequenceLSTM.h
+++ b/common/operations/BidirectionalSequenceLSTM.h
@@ -17,8 +17,6 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_BIDIRECTIONAL_SEQUENCE_LSTM_H
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_BIDIRECTIONAL_SEQUENCE_LSTM_H
 
-#include <tensorflow/lite/kernels/internal/tensor_utils.h>
-
 #include <algorithm>
 #include <cmath>
 #include <vector>
diff --git a/common/operations/BidirectionalSequenceRNN.cpp b/common/operations/BidirectionalSequenceRNN.cpp
index 5a020d1..3a63bdb 100644
--- a/common/operations/BidirectionalSequenceRNN.cpp
+++ b/common/operations/BidirectionalSequenceRNN.cpp
@@ -58,6 +58,7 @@
 constexpr uint32_t kFwOutputHiddenStateTensor = 2;
 constexpr uint32_t kBwOutputHiddenStateTensor = 3;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -312,6 +313,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -341,6 +343,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     const bool mergeOutputs = context->getInputValue<bool>(kMergeOutputsParam);
     const int32_t numOutputs = context->getNumOutputs();
@@ -468,6 +471,7 @@
     }
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace bidirectional_sequence_rnn
 
diff --git a/common/operations/Broadcast.cpp b/common/operations/Broadcast.cpp
index a2d5b8a..6854fd0 100644
--- a/common/operations/Broadcast.cpp
+++ b/common/operations/Broadcast.cpp
@@ -18,6 +18,16 @@
 
 #define LOG_TAG "Operations"
 
+#include <algorithm>
+#include <vector>
+
+#include "IndexedShapeWrapper.h"
+#include "OperationResolver.h"
+#include "Tracing.h"
+#include "nnapi/Types.h"
+#include "nnapi/Validation.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/integer_ops/add.h>
 #include <tensorflow/lite/kernels/internal/optimized/integer_ops/mul.h>
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -25,15 +35,8 @@
 #include <tensorflow/lite/kernels/internal/reference/integer_ops/mul.h>
 #include <tensorflow/lite/kernels/internal/types.h>
 
-#include <algorithm>
-#include <vector>
-
 #include "CpuOperationUtils.h"
-#include "IndexedShapeWrapper.h"
-#include "OperationResolver.h"
-#include "Tracing.h"
-#include "nnapi/Types.h"
-#include "nnapi/Validation.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -48,6 +51,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 #define ANDROID_NN_MACRO_DISPATCH(macro)                                \
@@ -433,6 +437,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(OperationType opType, const IOperationValidationContext* context) {
     auto minSupportedVersion = (opType == OperationType::DIV || opType == OperationType::SUB)
@@ -476,6 +481,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input1 = context->getInputShape(kInputTensor1);
     Shape input2 = context->getInputShape(kInputTensor2);
@@ -677,6 +683,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation DIV";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace broadcast
 
diff --git a/common/operations/Concatenation.cpp b/common/operations/Concatenation.cpp
index 8c08fd0..6047927 100644
--- a/common/operations/Concatenation.cpp
+++ b/common/operations/Concatenation.cpp
@@ -16,20 +16,23 @@
 
 #define LOG_TAG "Operations"
 
+#include <algorithm>
+#include <iterator>
+#include <vector>
+
+#include "OperationResolver.h"
+#include "OperationsUtils.h"
+#include "Tracing.h"
+#include "nnapi/Validation.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
 #include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
 #include <tensorflow/lite/kernels/internal/types.h>
 
-#include <algorithm>
-#include <iterator>
-#include <vector>
-
 #include "CpuOperationUtils.h"
-#include "OperationResolver.h"
-#include "OperationsUtils.h"
-#include "Tracing.h"
-#include "nnapi/Validation.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -40,6 +43,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -135,6 +139,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     uint32_t inputCount = context->getNumInputs();
@@ -173,6 +178,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     uint32_t numInputs = context->getNumInputs();
     NN_RET_CHECK_GE(numInputs, 2);
@@ -220,6 +226,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace concatenation
 
diff --git a/common/operations/Conv2D.cpp b/common/operations/Conv2D.cpp
index 6d98982..ce3c4db 100644
--- a/common/operations/Conv2D.cpp
+++ b/common/operations/Conv2D.cpp
@@ -16,21 +16,24 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/reference/integer_ops/conv.h>
-#include <tensorflow/lite/kernels/internal/types.h>
-
 #include <algorithm>
 #include <iterator>
 #include <memory>
 #include <vector>
 
-#include "CpuOperationUtils.h"
+#include "LegacyUtils.h"
 #include "OperationResolver.h"
 #include "Operations.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
-#include "Utils.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/reference/integer_ops/conv.h>
+#include <tensorflow/lite/kernels/internal/types.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -126,6 +129,7 @@
     }
 };
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #define ANDROID_NN_CONV_PARAMETERS(Type)                                          \
     uint32_t height = getSizeOfDimension(inputShape, 1);                          \
     uint32_t width = getSizeOfDimension(inputShape, 2);                           \
@@ -523,6 +527,7 @@
 }
 
 #undef ANDROID_NN_CONV_PARAMETERS
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace
 
@@ -627,6 +632,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape filter = context->getInputShape(kFilterTensor);
@@ -791,6 +797,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace conv_2d
 
diff --git a/common/operations/DepthwiseConv2D.cpp b/common/operations/DepthwiseConv2D.cpp
index 64bd7dd..815a860 100644
--- a/common/operations/DepthwiseConv2D.cpp
+++ b/common/operations/DepthwiseConv2D.cpp
@@ -16,17 +16,20 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h>
-#include <tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h>
-
 #include <algorithm>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Operations.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h>
+#include <tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace depthwise_conv_2d {
@@ -40,6 +43,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 struct DepthwiseConv2dParam {
@@ -412,6 +416,7 @@
 #undef ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     const uint32_t numInputs = context->getNumInputs();
@@ -510,6 +515,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape filter = context->getInputShape(kFilterTensor);
@@ -674,6 +680,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace depthwise_conv_2d
 
diff --git a/common/operations/ExpandDims.cpp b/common/operations/ExpandDims.cpp
index 2f546c9..435b3c7 100644
--- a/common/operations/ExpandDims.cpp
+++ b/common/operations/ExpandDims.cpp
@@ -18,7 +18,7 @@
 
 #include "ExpandDims.h"
 
-#include "Utils.h"
+#include "LegacyUtils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/operations/FullyConnected.cpp b/common/operations/FullyConnected.cpp
index ab50d31..176425c 100644
--- a/common/operations/FullyConnected.cpp
+++ b/common/operations/FullyConnected.cpp
@@ -14,19 +14,22 @@
  * limitations under the License.
  */
 
-#include "tensorflow/lite/kernels/internal/types.h"
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h>
-#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
-
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h>
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+#include <tensorflow/lite/kernels/internal/types.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace fully_connected {
@@ -44,6 +47,7 @@
 
 namespace {
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 // executionMutex is used to protect concurrent access of non-threadsafe resources
 // like gemmlowp::GemmContext.
 // std::mutex is safe for pthreads on Android.
@@ -176,6 +180,7 @@
 
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 bool validateShapes(const Shape& input, const Shape& weights, const Shape& bias,
                     Shape* output = nullptr) {
@@ -286,6 +291,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape weights = context->getInputShape(kWeightsTensor);
@@ -343,6 +349,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace fully_connected
 
diff --git a/common/operations/GenerateProposals.cpp b/common/operations/GenerateProposals.cpp
index 95e3676..2f30a72 100644
--- a/common/operations/GenerateProposals.cpp
+++ b/common/operations/GenerateProposals.cpp
@@ -23,15 +23,19 @@
 #include <utility>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace bbox_ops {
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 struct BoxEncodingCorner {
@@ -183,6 +187,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace axis_aligned_bbox_transform {
 
@@ -221,6 +226,7 @@
     return Version::ANDROID_Q;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape roiShape = context->getInputShape(kRoiTensor);
     Shape bboxDeltasShape = context->getInputShape(kDeltaTensor);
@@ -322,6 +328,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace axis_aligned_bbox_transform
 
@@ -346,6 +353,7 @@
 constexpr uint32_t kOutputClassTensor = 2;
 constexpr uint32_t kOutputBatchesTensor = 3;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 // TODO(xusongw): Reduce code duplication with hard/soft nms path.
@@ -700,6 +708,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -746,6 +755,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape scoreShape = context->getInputShape(kScoreTensor);
     Shape roiShape = context->getInputShape(kRoiTensor);
@@ -898,6 +908,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace box_with_nms_limit
 
@@ -923,6 +934,7 @@
 constexpr uint32_t kOutputRoiTensor = 1;
 constexpr uint32_t kOutputBatchesTensor = 2;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 void filterBoxes(const float* roiBase, const float* imageInfoBase, float minSize,
@@ -1210,6 +1222,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -1272,6 +1285,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     bool useNchw = context->getInputValue<bool>(kLayoutScalar);
     Shape scoreShape = context->getInputShape(kScoreTensor);
@@ -1401,6 +1415,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace generate_proposals
 
@@ -1430,6 +1445,7 @@
 constexpr uint32_t kOutputClassTensor = 2;
 constexpr uint32_t kOutputDetectionTensor = 3;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 bool detectionPostprocessFloat32(
@@ -1566,6 +1582,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -1598,6 +1615,7 @@
     return Version::ANDROID_Q;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape scoreShape = context->getInputShape(kScoreTensor);
     Shape deltasShape = context->getInputShape(kDeltaTensor);
@@ -1734,6 +1752,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace detection_postprocess
 
diff --git a/common/operations/HeatmapMaxKeypoint.cpp b/common/operations/HeatmapMaxKeypoint.cpp
index 63fc597..b902e08 100644
--- a/common/operations/HeatmapMaxKeypoint.cpp
+++ b/common/operations/HeatmapMaxKeypoint.cpp
@@ -21,11 +21,14 @@
 #include <cmath>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace heatmap_max_keypoint {
@@ -41,6 +44,7 @@
 constexpr uint32_t kOutputScoreTensor = 0;
 constexpr uint32_t kOutputKeypointTensor = 1;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 // This function uses Taylor expansion up to the quatratic term to approximate bicubic
@@ -223,6 +227,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -252,6 +257,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     bool layout = context->getInputValue<bool>(kLayoutScalar);
     Shape heatmapShape = context->getInputShape(kHeatmapTensor);
@@ -355,6 +361,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace heatmap_max_keypoint
 
diff --git a/common/operations/InstanceNormalization.cpp b/common/operations/InstanceNormalization.cpp
index 1a0e488..0da0e05 100644
--- a/common/operations/InstanceNormalization.cpp
+++ b/common/operations/InstanceNormalization.cpp
@@ -19,10 +19,13 @@
 #include <cmath>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace instance_normalization {
@@ -39,6 +42,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -98,6 +102,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -118,6 +123,7 @@
     return Version::ANDROID_Q;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
@@ -148,6 +154,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace instance_normalization
 
diff --git a/common/operations/L2Normalization.cpp b/common/operations/L2Normalization.cpp
index 05682ea..7d9adf4 100644
--- a/common/operations/L2Normalization.cpp
+++ b/common/operations/L2Normalization.cpp
@@ -16,16 +16,19 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h>
-
 #include <algorithm>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace l2_norm {
@@ -39,6 +42,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline bool l2normFloat32Impl(const float* inputData, const Shape& inputShape, int32_t axis,
@@ -195,6 +199,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK(context->getNumInputs() == kNumInputs ||
@@ -228,6 +233,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     const Shape& input = context->getInputShape(kInputTensor);
     int32_t numDimensions = getNumberOfDimensions(input);
@@ -283,6 +289,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace l2_norm
 
diff --git a/common/operations/LSHProjection.cpp b/common/operations/LSHProjection.cpp
index 14d7a79..c3f051d 100644
--- a/common/operations/LSHProjection.cpp
+++ b/common/operations/LSHProjection.cpp
@@ -23,8 +23,8 @@
 #include <memory>
 
 #include "CpuExecutor.h"
+#include "LegacyUtils.h"
 #include "Tracing.h"
-#include "Utils.h"
 #include "nnapi/Types.h"
 
 namespace android {
diff --git a/common/operations/LSTM.cpp b/common/operations/LSTM.cpp
index e64d0c4..5c16ccf 100644
--- a/common/operations/LSTM.cpp
+++ b/common/operations/LSTM.cpp
@@ -18,13 +18,15 @@
 
 #include "LSTM.h"
 
+#include <tensorflow/lite/kernels/internal/tensor_utils.h>
+
 #include <vector>
 
 #include "CpuExecutor.h"
 #include "CpuOperationUtils.h"
+#include "LegacyUtils.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
-#include "Utils.h"
 #include "nnapi/Types.h"
 
 namespace android {
diff --git a/common/operations/LocalResponseNormalization.cpp b/common/operations/LocalResponseNormalization.cpp
index ed16dec..6ad801d 100644
--- a/common/operations/LocalResponseNormalization.cpp
+++ b/common/operations/LocalResponseNormalization.cpp
@@ -16,15 +16,18 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
-
 #include <algorithm>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace local_response_norm {
@@ -42,6 +45,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline bool localResponseNormFloat32Impl(const float* inputData, const Shape& inputShape,
@@ -129,6 +133,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK(context->getNumInputs() == kNumInputs ||
@@ -173,6 +178,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     const Shape& input = context->getInputShape(kInputTensor);
     int32_t numDimensions = getNumberOfDimensions(input);
@@ -195,6 +201,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace local_response_norm
 
diff --git a/common/operations/Multinomial.cpp b/common/operations/Multinomial.cpp
index 6521bc7..4b12f8f 100644
--- a/common/operations/Multinomial.cpp
+++ b/common/operations/Multinomial.cpp
@@ -20,15 +20,21 @@
 
 #include <algorithm>
 #include <limits>
-#include <unsupported/Eigen/CXX11/Tensor>
 #include <vector>
 
 #include "CpuExecutor.h"
-#include "CpuOperationUtils.h"
 #include "Tracing.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/tensor_utils.h>
+
+#include <unsupported/Eigen/CXX11/Tensor>
+
+#include "CpuOperationUtils.h"
 #include "guarded_philox_random.h"
 #include "philox_random.h"
 #include "simple_philox.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
diff --git a/common/operations/Multinomial.h b/common/operations/Multinomial.h
index bdfe587..ba0b114 100644
--- a/common/operations/Multinomial.h
+++ b/common/operations/Multinomial.h
@@ -17,8 +17,6 @@
 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_MULTINOMIAL_H
 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_MULTINOMIAL_H
 
-#include <tensorflow/lite/kernels/internal/tensor_utils.h>
-
 #include <algorithm>
 #include <cmath>
 #include <vector>
diff --git a/common/operations/PRelu.cpp b/common/operations/PRelu.cpp
index 88e38fc..60d7210 100644
--- a/common/operations/PRelu.cpp
+++ b/common/operations/PRelu.cpp
@@ -16,7 +16,9 @@
 
 #define LOG_TAG "Operations"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 #include <algorithm>
 #include <vector>
@@ -39,6 +41,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 template <typename T>
 inline bool eval(const std::function<T(const T&, const T&)>& func, const T* aData,
                  const Shape& aShape, const T* bData, const Shape& bShape, T* outputData,
@@ -94,6 +97,7 @@
             },
             aData, aShape, bData, bShape, outputData, outputShape);
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -113,6 +117,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape alpha = context->getInputShape(kAlphaTensor);
@@ -166,6 +171,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace prelu
 
diff --git a/common/operations/Pooling.cpp b/common/operations/Pooling.cpp
index 6cd2864..20a5c76 100644
--- a/common/operations/Pooling.cpp
+++ b/common/operations/Pooling.cpp
@@ -16,16 +16,19 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h>
-
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 #include "nnapi/Validation.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 
@@ -36,6 +39,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 struct PoolingParam {
@@ -287,6 +291,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(OperationType opType, const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
@@ -352,6 +357,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
@@ -431,6 +437,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation MAX_POOL_2D";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 #undef POOLING_DISPATCH_INPUT_TYPE
 
diff --git a/common/operations/QLSTM.cpp b/common/operations/QLSTM.cpp
index e8c4f90..4dad7f7 100644
--- a/common/operations/QLSTM.cpp
+++ b/common/operations/QLSTM.cpp
@@ -20,7 +20,9 @@
 
 #include "CpuExecutor.h"
 #include "OperationsUtils.h"
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include "QuantUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -360,6 +362,7 @@
            context->setOutputShape(kOutputTensor, outputShape);
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool execute(IOperationExecutionContext* context) {
     // Gets the inputs.
     const Shape inputShape = context->getInputShape(kInputTensor);
@@ -792,6 +795,7 @@
 
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace qlstm
 
diff --git a/common/operations/Rank.cpp b/common/operations/Rank.cpp
index f636341..23f5c1e 100644
--- a/common/operations/Rank.cpp
+++ b/common/operations/Rank.cpp
@@ -16,9 +16,9 @@
 
 #define LOG_TAG "Operations"
 
+#include "LegacyUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/operations/Reduce.cpp b/common/operations/Reduce.cpp
index 9eb1956..a73b652 100644
--- a/common/operations/Reduce.cpp
+++ b/common/operations/Reduce.cpp
@@ -16,7 +16,9 @@
 
 #define LOG_TAG "Operations"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 #include <algorithm>
 #include <limits>
@@ -43,6 +45,7 @@
 constexpr _Float16 kFloat16Max = 65504;
 constexpr _Float16 kFloat16Lowest = -kFloat16Max;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -65,6 +68,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validateProdSum(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -122,6 +126,7 @@
     return Version::ANDROID_Q;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape inputShape = context->getInputShape(kInputTensor);
     const uint32_t inputRank = getNumberOfDimensions(inputShape);
@@ -247,6 +252,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation REDUCE_ALL";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace reduce
 
diff --git a/common/operations/Reshape.cpp b/common/operations/Reshape.cpp
index 76effb8..35dee5f 100644
--- a/common/operations/Reshape.cpp
+++ b/common/operations/Reshape.cpp
@@ -24,9 +24,9 @@
 #include <vector>
 
 #include "CpuOperationUtils.h"
+#include "LegacyUtils.h"
 #include "Operations.h"
 #include "Tracing.h"
-#include "Utils.h"
 
 namespace android {
 namespace nn {
diff --git a/common/operations/ResizeImageOps.cpp b/common/operations/ResizeImageOps.cpp
index 733bedb..ea3a7dd 100644
--- a/common/operations/ResizeImageOps.cpp
+++ b/common/operations/ResizeImageOps.cpp
@@ -16,17 +16,20 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
-
 #include <algorithm>
 #include <functional>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 #include "nnapi/Validation.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 
@@ -45,6 +48,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline float scaleHalfPixel(const int x, const float scale) {
@@ -168,6 +172,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(OperationType opType, const IOperationValidationContext* context) {
     const auto numInputs = context->getNumInputs();
@@ -221,6 +226,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(OperationType opType, IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
@@ -307,6 +313,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace resize_image
 
diff --git a/common/operations/RoiAlign.cpp b/common/operations/RoiAlign.cpp
index 3ca64f5..a6bba60 100644
--- a/common/operations/RoiAlign.cpp
+++ b/common/operations/RoiAlign.cpp
@@ -16,18 +16,21 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/common.h>
-
 #include <algorithm>
 #include <cfloat>
 #include <cmath>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/common.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace roi_align {
@@ -49,6 +52,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T_Input, typename T_Roi>
@@ -336,6 +340,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -378,6 +383,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     bool useNchw = context->getInputValue<bool>(kLayoutScalar);
     Shape input = context->getInputShape(kInputTensor);
@@ -500,6 +506,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace roi_align
 
diff --git a/common/operations/RoiPooling.cpp b/common/operations/RoiPooling.cpp
index 26e2213..15fc16c 100644
--- a/common/operations/RoiPooling.cpp
+++ b/common/operations/RoiPooling.cpp
@@ -21,11 +21,14 @@
 #include <cmath>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace roi_pooling {
@@ -45,6 +48,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T_Input, typename T_Roi>
@@ -183,6 +187,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -221,6 +226,7 @@
     }
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     bool useNchw = context->getInputValue<bool>(kLayoutScalar);
     Shape input = context->getInputShape(kInputTensor);
@@ -322,6 +328,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace roi_pooling
 
diff --git a/common/operations/Slice.cpp b/common/operations/Slice.cpp
index db47419..9b797d0 100644
--- a/common/operations/Slice.cpp
+++ b/common/operations/Slice.cpp
@@ -18,10 +18,13 @@
 
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "IndexedShapeWrapper.h"
 #include "OperationResolver.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace slice {
@@ -36,6 +39,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -77,6 +81,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -101,6 +106,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     const Shape& inputShape = context->getInputShape(kInputTensor);
     const int32_t n_dims = getNumberOfDimensions(inputShape);
@@ -186,6 +192,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace slice
 
diff --git a/common/operations/Softmax.cpp b/common/operations/Softmax.cpp
index 3e65d85..56de331 100644
--- a/common/operations/Softmax.cpp
+++ b/common/operations/Softmax.cpp
@@ -16,19 +16,22 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
-#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
-
 #include <algorithm>
 #include <cfloat>
 #include <limits>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 #include "nnapi/Validation.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
+#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 
@@ -44,6 +47,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline bool softmaxSlowFloat32(const float* inputData, const Shape& inputShape, const float beta,
@@ -226,6 +230,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK(context->getNumInputs() == kNumInputs ||
@@ -263,6 +268,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     float beta = (input.type == OperandType::TENSOR_FLOAT16)
@@ -310,6 +316,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace softmax
 
diff --git a/common/operations/Squeeze.cpp b/common/operations/Squeeze.cpp
index 2fe8eb8..c652005 100644
--- a/common/operations/Squeeze.cpp
+++ b/common/operations/Squeeze.cpp
@@ -66,6 +66,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     // Only the squeeze dims tensor can be omitted.
     NN_RET_CHECK(!context->isOmittedInput(kInputTensor));
@@ -137,6 +138,8 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for SQUEEZE op.";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 }  // namespace squeeze
 
 NN_REGISTER_OPERATION(SQUEEZE, "SQUEEZE", squeeze::validate, squeeze::prepare, squeeze::execute,
diff --git a/common/operations/StridedSlice.cpp b/common/operations/StridedSlice.cpp
index fd66ca7..e13c9b0 100644
--- a/common/operations/StridedSlice.cpp
+++ b/common/operations/StridedSlice.cpp
@@ -18,15 +18,18 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
-
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Operations.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace strided_slice {
@@ -43,6 +46,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -95,6 +99,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -132,6 +137,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     // StridedSlice op only supports 1D-4D input arrays.
     const Shape& inputShape = context->getInputShape(kInputTensor);
@@ -213,6 +219,8 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for STRIDED_SLICE op.";
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 }  // namespace strided_slice
 
 NN_REGISTER_OPERATION(STRIDED_SLICE, "STRIDED_SLICE", strided_slice::validate,
diff --git a/common/operations/Transpose.cpp b/common/operations/Transpose.cpp
index 0e61575..0f2ae2a 100644
--- a/common/operations/Transpose.cpp
+++ b/common/operations/Transpose.cpp
@@ -16,14 +16,17 @@
 
 #define LOG_TAG "Operations"
 
+#include <vector>
+
+#include "OperationResolver.h"
+#include "Tracing.h"
+
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
 
-#include <vector>
-
 #include "CpuOperationUtils.h"
-#include "OperationResolver.h"
-#include "Tracing.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 namespace android {
 namespace nn {
@@ -38,6 +41,7 @@
 constexpr uint32_t kNumOutputs = 1;
 constexpr uint32_t kOutputTensor = 0;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -68,6 +72,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -93,6 +98,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     // Only the permutation tensor can be omitted.
     NN_RET_CHECK(!context->isOmittedInput(kInputTensor));
@@ -168,6 +174,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace transpose
 
diff --git a/common/operations/TransposeConv2D.cpp b/common/operations/TransposeConv2D.cpp
index 002df27..0561e02 100644
--- a/common/operations/TransposeConv2D.cpp
+++ b/common/operations/TransposeConv2D.cpp
@@ -16,18 +16,21 @@
 
 #define LOG_TAG "Operations"
 
-#include <tensorflow/lite/kernels/internal/common.h>
-
 #include <algorithm>
 #include <cfloat>
 #include <cmath>
 #include <memory>
 #include <vector>
 
-#include "CpuOperationUtils.h"
 #include "OperationResolver.h"
 #include "Tracing.h"
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
+#include <tensorflow/lite/kernels/internal/common.h>
+
+#include "CpuOperationUtils.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+
 namespace android {
 namespace nn {
 namespace transpose_conv_2d {
@@ -104,6 +107,7 @@
     }
 };
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #define ANDROID_NN_TRANSPOSE_CONV_PARAMETERS                                    \
     uint32_t numBatches = getSizeOfDimension(inputShape, 0);                    \
     uint32_t inputHeight = getSizeOfDimension(inputShape, 1);                   \
@@ -430,6 +434,7 @@
 }
 
 #undef ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace
 
@@ -479,6 +484,7 @@
     return minSupportedVersion;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape filter = context->getInputShape(kFilterTensor);
@@ -620,6 +626,7 @@
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
     }
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace transpose_conv_2d
 
diff --git a/common/operations/UnidirectionalSequenceLSTM.cpp b/common/operations/UnidirectionalSequenceLSTM.cpp
index dc734e8..7d34023 100644
--- a/common/operations/UnidirectionalSequenceLSTM.cpp
+++ b/common/operations/UnidirectionalSequenceLSTM.cpp
@@ -19,7 +19,9 @@
 #include <vector>
 
 #include "IndexedShapeWrapper.h"
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 #include "LSTM.h"
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 #include "OperationResolver.h"
 #include "OperationsUtils.h"
 
@@ -85,6 +87,7 @@
 constexpr uint32_t kOutputStateOutTensor = 1;
 constexpr uint32_t kCellStateOutTensor = 2;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 inline bool hasTensor(IOperationExecutionContext* context, const uint32_t tensor) {
@@ -111,6 +114,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -166,6 +170,7 @@
     return minVersionSupported;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     // Check that none of the required inputs are omitted
     const std::vector<int> requiredInputs = {
@@ -512,6 +517,7 @@
     }
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace unidirectional_sequence_lstm
 
diff --git a/common/operations/UnidirectionalSequenceRNN.cpp b/common/operations/UnidirectionalSequenceRNN.cpp
index eaf60ed..a9ad503 100644
--- a/common/operations/UnidirectionalSequenceRNN.cpp
+++ b/common/operations/UnidirectionalSequenceRNN.cpp
@@ -42,6 +42,7 @@
 constexpr uint32_t kOutputTensor = 0;
 constexpr uint32_t kStateOutputTensor = 1;
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 namespace {
 
 template <typename T>
@@ -125,6 +126,7 @@
 }
 
 }  // namespace
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 Result<Version> validate(const IOperationValidationContext* context) {
     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
@@ -147,6 +149,7 @@
     return minVersionSupported;
 }
 
+#ifndef NN_COMPATIBILITY_LIBRARY_BUILD
 bool prepare(IOperationExecutionContext* context) {
     Shape input = context->getInputShape(kInputTensor);
     Shape weights = context->getInputShape(kWeightsTensor);
@@ -202,6 +205,7 @@
     }
     return true;
 }
+#endif  // NN_COMPATIBILITY_LIBRARY_BUILD
 
 }  // namespace unidirectional_sequence_rnn