Add NDK API to allow developers to query the device type

Bug: 111425781
Bug: 112661050
Test: mm
Test: NeuralNetworksTest_static
Change-Id: Iff756fa8d245c2c18d8ea8682ca648640ab45a3c
Merged-In: Iff756fa8d245c2c18d8ea8682ca648640ab45a3c
(cherry picked from commit 7dd333dd0a075899cb0d03fb6b433f8bbd9febd2)
diff --git a/common/include/HalInterfaces.h b/common/include/HalInterfaces.h
index dc5e72e..1c195a3 100644
--- a/common/include/HalInterfaces.h
+++ b/common/include/HalInterfaces.h
@@ -48,6 +48,7 @@
 using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
 using ::android::hardware::neuralnetworks::V1_1::Capabilities;
 using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
+using ::android::hardware::neuralnetworks::V1_2::DeviceType;
 using ::android::hardware::neuralnetworks::V1_2::IDevice;
 using ::android::hardware::neuralnetworks::V1_2::IExecutionCallback;
 using ::android::hardware::neuralnetworks::V1_2::IPreparedModel;
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp
index 45df862..3d73465 100644
--- a/runtime/Manager.cpp
+++ b/runtime/Manager.cpp
@@ -50,6 +50,7 @@
     const char* getVersionString() const override { return mVersionString.c_str(); }
     VersionedIDevice* getInterface() override { return &mInterface; }
     int64_t getFeatureLevel() override { return mInterface.getFeatureLevel(); }
+    int32_t getType() const override { return mInterface.getType(); }
     void getSupportedOperations(const Model& hidlModel, hidl_vec<bool>* supported) override;
     PerformanceInfo getFloat32Performance() const override { return mFloat32Performance; }
     PerformanceInfo getQuantized8Performance() const override { return mQuantized8Performance; }
@@ -230,6 +231,7 @@
     const char* getVersionString() const override { return kVersionString.c_str(); }
     VersionedIDevice* getInterface() override { return nullptr; }
     int64_t getFeatureLevel() override { return kFeatureLevel; }
+    int32_t getType() const override { return ANEURALNETWORKS_DEVICE_CPU; }
     void getSupportedOperations(const Model& hidlModel, hidl_vec<bool>* supported) override;
     PerformanceInfo getFloat32Performance() const override { return kPerformance; }
     PerformanceInfo getQuantized8Performance() const override { return kPerformance; }
diff --git a/runtime/Manager.h b/runtime/Manager.h
index 2541a97..1bf0462 100644
--- a/runtime/Manager.h
+++ b/runtime/Manager.h
@@ -41,6 +41,7 @@
     virtual const char* getName() const = 0;
     virtual const char* getVersionString() const = 0;
     virtual int64_t getFeatureLevel() = 0;
+    virtual int32_t getType() const = 0;
     virtual void getSupportedOperations(const Model& hidlModel, hidl_vec<bool>* supported) = 0;
     virtual PerformanceInfo getFloat32Performance() const = 0;
     virtual PerformanceInfo getQuantized8Performance() const = 0;
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index 16b05a4..870502e 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -27,8 +27,8 @@
 #include "ExecutionBuilder.h"
 #include "Manager.h"
 #include "Memory.h"
-#include "NeuralNetworksOEM.h"
 #include "ModelBuilder.h"
+#include "NeuralNetworksOEM.h"
 #include "Tracing.h"
 #include "Utils.h"
 
@@ -61,20 +61,17 @@
 // IMPORTANT: When adding new values, update kNumberOfOperationTypes or
 // kNumberOfOperationTypesOEMin Utils.h.
 static_assert(ANEURALNETWORKS_ADD == 0, "ANEURALNETWORKS_ADD has changed");
-static_assert(ANEURALNETWORKS_AVERAGE_POOL_2D == 1,
-              "ANEURALNETWORKS_AVERAGE_POOL_2D has changed");
+static_assert(ANEURALNETWORKS_AVERAGE_POOL_2D == 1, "ANEURALNETWORKS_AVERAGE_POOL_2D has changed");
 static_assert(ANEURALNETWORKS_CONCATENATION == 2, "ANEURALNETWORKS_CONCATENATION has changed");
 static_assert(ANEURALNETWORKS_CONV_2D == 3, "ANEURALNETWORKS_CONV_2D has changed");
 static_assert(ANEURALNETWORKS_DEPTHWISE_CONV_2D == 4,
               "ANEURALNETWORKS_DEPTHWISE_CONV_2D has changed");
-static_assert(ANEURALNETWORKS_DEPTH_TO_SPACE == 5,
-              "ANEURALNETWORKS_DEPTH_TO_SPACE has changed");
+static_assert(ANEURALNETWORKS_DEPTH_TO_SPACE == 5, "ANEURALNETWORKS_DEPTH_TO_SPACE has changed");
 static_assert(ANEURALNETWORKS_DEQUANTIZE == 6, "ANEURALNETWORKS_DEQUANTIZE has changed");
 static_assert(ANEURALNETWORKS_EMBEDDING_LOOKUP == 7,
               "ANEURALNETWORKS_EMBEDDING_LOOKUP has changed");
 static_assert(ANEURALNETWORKS_FLOOR == 8, "ANEURALNETWORKS_FLOOR has changed");
-static_assert(ANEURALNETWORKS_FULLY_CONNECTED == 9,
-              "ANEURALNETWORKS_FULLY_CONNECTED has changed");
+static_assert(ANEURALNETWORKS_FULLY_CONNECTED == 9, "ANEURALNETWORKS_FULLY_CONNECTED has changed");
 static_assert(ANEURALNETWORKS_HASHTABLE_LOOKUP == 10,
               "ANEURALNETWORKS_HASHTABLE_LOOKUP has changed");
 static_assert(ANEURALNETWORKS_L2_NORMALIZATION == 11,
@@ -83,8 +80,7 @@
 static_assert(ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION == 13,
               "ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION has changed");
 static_assert(ANEURALNETWORKS_LOGISTIC == 14, "ANEURALNETWORKS_LOGISTIC has changed");
-static_assert(ANEURALNETWORKS_LSH_PROJECTION == 15,
-              "ANEURALNETWORKS_LSH_PROJECTION has changed");
+static_assert(ANEURALNETWORKS_LSH_PROJECTION == 15, "ANEURALNETWORKS_LSH_PROJECTION has changed");
 static_assert(ANEURALNETWORKS_LSTM == 16, "ANEURALNETWORKS_LSTM has changed");
 static_assert(ANEURALNETWORKS_MAX_POOL_2D == 17, "ANEURALNETWORKS_MAX_POOL has changed");
 static_assert(ANEURALNETWORKS_MUL == 18, "ANEURALNETWORKS_MUL has changed");
@@ -92,27 +88,26 @@
 static_assert(ANEURALNETWORKS_RELU1 == 20, "ANEURALNETWORKS_RELU1 has changed");
 static_assert(ANEURALNETWORKS_RELU6 == 21, "ANEURALNETWORKS_RELU6 has changed");
 static_assert(ANEURALNETWORKS_RESHAPE == 22, "ANEURALNETWORKS_RESHAPE has changed");
-static_assert(ANEURALNETWORKS_RESIZE_BILINEAR == 23,
-              "ANEURALNETWORKS_RESIZE_BILINEAR has changed");
+static_assert(ANEURALNETWORKS_RESIZE_BILINEAR == 23, "ANEURALNETWORKS_RESIZE_BILINEAR has changed");
 static_assert(ANEURALNETWORKS_RNN == 24, "ANEURALNETWORKS_RNN has changed");
 static_assert(ANEURALNETWORKS_SOFTMAX == 25, "ANEURALNETWORKS_SOFTMAX has changed");
-static_assert(ANEURALNETWORKS_SPACE_TO_DEPTH == 26,
-              "ANEURALNETWORKS_SPACE_TO_DEPTH has changed");
+static_assert(ANEURALNETWORKS_SPACE_TO_DEPTH == 26, "ANEURALNETWORKS_SPACE_TO_DEPTH has changed");
 static_assert(ANEURALNETWORKS_SVDF == 27, "ANEURALNETWORKS_SVDF has changed");
 static_assert(ANEURALNETWORKS_TANH == 28, "ANEURALNETWORKS_TANH has changed");
 
-static_assert(ANEURALNETWORKS_BATCH_TO_SPACE_ND == 29, "ANEURALNETWORKS_BATCH_TO_SPACE_ND has changed");
+static_assert(ANEURALNETWORKS_BATCH_TO_SPACE_ND == 29,
+              "ANEURALNETWORKS_BATCH_TO_SPACE_ND has changed");
 static_assert(ANEURALNETWORKS_DIV == 30, "ANEURALNETWORKS_DIV has changed");
 static_assert(ANEURALNETWORKS_MEAN == 31, "ANEURALNETWORKS_MEAN has changed");
 static_assert(ANEURALNETWORKS_PAD == 32, "ANEURALNETWORKS_PAD has changed");
-static_assert(ANEURALNETWORKS_SPACE_TO_BATCH_ND == 33, "ANEURALNETWORKS_SPACE_TO_BATCH_ND has changed");
+static_assert(ANEURALNETWORKS_SPACE_TO_BATCH_ND == 33,
+              "ANEURALNETWORKS_SPACE_TO_BATCH_ND has changed");
 static_assert(ANEURALNETWORKS_SQUEEZE == 34, "ANEURALNETWORKS_SQUEEZE has changed");
 static_assert(ANEURALNETWORKS_STRIDED_SLICE == 35, "ANEURALNETWORKS_STRIDED_SLICE has changed");
 static_assert(ANEURALNETWORKS_SUB == 36, "ANEURALNETWORKS_TANH has changed");
 static_assert(ANEURALNETWORKS_TRANSPOSE == 37, "ANEURALNETWORKS_TRANSPOSE has changed");
 
-static_assert(ANEURALNETWORKS_OEM_OPERATION == 10000,
-              "ANEURALNETWORKS_OEM_OPERATION has changed");
+static_assert(ANEURALNETWORKS_OEM_OPERATION == 10000, "ANEURALNETWORKS_OEM_OPERATION has changed");
 
 static_assert(ANEURALNETWORKS_FUSED_NONE == 0, "ANEURALNETWORKS_FUSED_NONE has changed");
 static_assert(ANEURALNETWORKS_FUSED_RELU == 1, "ANEURALNETWORKS_FUSED_RELU has changed");
@@ -129,8 +124,7 @@
 static_assert(ANEURALNETWORKS_NO_ERROR == 0, "ANEURALNETWORKS_NO_ERROR has changed");
 static_assert(ANEURALNETWORKS_OUT_OF_MEMORY == 1, "ANEURALNETWORKS_OUT_OF_MEMORY has changed");
 static_assert(ANEURALNETWORKS_INCOMPLETE == 2, "ANEURALNETWORKS_INCOMPLETE has changed");
-static_assert(ANEURALNETWORKS_UNEXPECTED_NULL == 3,
-              "ANEURALNETWORKS_UNEXPECTED_NULL has changed");
+static_assert(ANEURALNETWORKS_UNEXPECTED_NULL == 3, "ANEURALNETWORKS_UNEXPECTED_NULL has changed");
 static_assert(ANEURALNETWORKS_BAD_DATA == 4, "ANEURALNETWORKS_BAD_DATA has changed");
 static_assert(ANEURALNETWORKS_OP_FAILED == 5, "ANEURALNETWORKS_OP_FAILED has changed");
 static_assert(ANEURALNETWORKS_BAD_STATE == 6, "ANEURALNETWORKS_BAD_STATE has changed");
@@ -141,6 +135,13 @@
 static_assert(ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES == 128,
               "ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES has changed");
 
+static_assert(ANEURALNETWORKS_DEVICE_UNKNOWN == 0, "ANEURALNETWORKS_DEVICE_UNKNOWN has changed");
+static_assert(ANEURALNETWORKS_DEVICE_OTHER == 1, "ANEURALNETWORKS_DEVICE_OTHER has changed");
+static_assert(ANEURALNETWORKS_DEVICE_CPU == 2, "ANEURALNETWORKS_DEVICE_CPU has changed");
+static_assert(ANEURALNETWORKS_DEVICE_GPU == 3, "ANEURALNETWORKS_DEVICE_GPU has changed");
+static_assert(ANEURALNETWORKS_DEVICE_ACCELERATOR == 4,
+              "ANEURALNETWORKS_DEVICE_ACCELERATOR has changed");
+
 // Make sure that the constants are compatible with the values defined in
 // hardware/interfaces/neuralnetworks/1.0/types.hal.
 static_assert(static_cast<int32_t>(OperandType::OEM) == ANEURALNETWORKS_OEM_SCALAR,
@@ -158,7 +159,7 @@
 static_assert(static_cast<int32_t>(OperandType::TENSOR_FLOAT32) == ANEURALNETWORKS_TENSOR_FLOAT32,
               "TENSOR_FLOAT32 != ANEURALNETWORKS_TENSOR_FLOAT32");
 static_assert(static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) ==
-                          ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
+                      ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
               "TENSOR_QUANT8_ASYMM != ANEURALNETWORKS_TENSOR_QUANT8_ASYMM");
 static_assert(static_cast<int32_t>(OperandType::BOOL) == ANEURALNETWORKS_BOOL,
               "BOOL != ANEURALNETWORKS_BOOL");
@@ -174,42 +175,40 @@
 static_assert(static_cast<int32_t>(OperationType::ADD) == ANEURALNETWORKS_ADD,
               "OperationType::ADD != ANEURALNETWORKS_ADD");
 static_assert(static_cast<int32_t>(OperationType::AVERAGE_POOL_2D) ==
-                          ANEURALNETWORKS_AVERAGE_POOL_2D,
+                      ANEURALNETWORKS_AVERAGE_POOL_2D,
               "OperationType::AVERAGE_POOL_2D != ANEURALNETWORKS_AVERAGE_POOL_2D");
 static_assert(static_cast<int32_t>(OperationType::CONV_2D) == ANEURALNETWORKS_CONV_2D,
               "OperationType::CONV_2D != ANEURALNETWORKS_CONV_2D");
 static_assert(static_cast<int32_t>(OperationType::DEPTHWISE_CONV_2D) ==
-                          ANEURALNETWORKS_DEPTHWISE_CONV_2D,
+                      ANEURALNETWORKS_DEPTHWISE_CONV_2D,
               "OperationType::DEPTHWISE_CONV_2D != ANEURALNETWORKS_DEPTHWISE_CONV_2D");
-static_assert(static_cast<int32_t>(OperationType::DEPTH_TO_SPACE) ==
-                          ANEURALNETWORKS_DEPTH_TO_SPACE,
+static_assert(static_cast<int32_t>(OperationType::DEPTH_TO_SPACE) == ANEURALNETWORKS_DEPTH_TO_SPACE,
               "OperationType::DEPTH_TO_SPACE != ANEURALNETWORKS_DEPTH_TO_SPACE");
 static_assert(static_cast<int32_t>(OperationType::DEQUANTIZE) == ANEURALNETWORKS_DEQUANTIZE,
               "OperationType::DEQUANTIZE != ANEURALNETWORKS_DEQUANTIZE");
 static_assert(static_cast<int32_t>(OperationType::EMBEDDING_LOOKUP) ==
-                          ANEURALNETWORKS_EMBEDDING_LOOKUP,
+                      ANEURALNETWORKS_EMBEDDING_LOOKUP,
               "OperationType::EMBEDDING_LOOKUP != ANEURALNETWORKS_EMBEDDING_LOOKUP");
 static_assert(static_cast<int32_t>(OperationType::FLOOR) == ANEURALNETWORKS_FLOOR,
               "OperationType::FLOOR != ANEURALNETWORKS_FLOOR");
 static_assert(static_cast<int32_t>(OperationType::FULLY_CONNECTED) ==
-                          ANEURALNETWORKS_FULLY_CONNECTED,
+                      ANEURALNETWORKS_FULLY_CONNECTED,
               "OperationType::FULLY_CONNECTED != ANEURALNETWORKS_FULLY_CONNECTED");
 static_assert(static_cast<int32_t>(OperationType::HASHTABLE_LOOKUP) ==
-                          ANEURALNETWORKS_HASHTABLE_LOOKUP,
+                      ANEURALNETWORKS_HASHTABLE_LOOKUP,
               "OperationType::HASHTABLE_LOOKUP != ANEURALNETWORKS_HASHTABLE_LOOKUP");
 static_assert(static_cast<int32_t>(OperationType::L2_NORMALIZATION) ==
-                          ANEURALNETWORKS_L2_NORMALIZATION,
+                      ANEURALNETWORKS_L2_NORMALIZATION,
               "OperationType::L2_NORMALIZATION != ANEURALNETWORKS_L2_NORMALIZATION");
 static_assert(static_cast<int32_t>(OperationType::L2_POOL_2D) == ANEURALNETWORKS_L2_POOL_2D,
               "OperationType::L2_POOL_2D != ANEURALNETWORKS_L2_POOL_2D");
 static_assert(static_cast<int32_t>(OperationType::LOCAL_RESPONSE_NORMALIZATION) ==
-                          ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
+                      ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
               "OperationType::LOCAL_RESPONSE_NORMALIZATION != "
               "ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION");
 static_assert(static_cast<int32_t>(OperationType::LOGISTIC) == ANEURALNETWORKS_LOGISTIC,
               "OperationType::LOGISTIC != ANEURALNETWORKS_LOGISTIC");
-static_assert(static_cast<int32_t>(OperationType::LSH_PROJECTION) ==
-                          ANEURALNETWORKS_LSH_PROJECTION,
+static_assert(static_cast<int32_t>(OperationType::LSH_PROJECTION) == ANEURALNETWORKS_LSH_PROJECTION,
               "OperationType::LSH_PROJECTION != ANEURALNETWORKS_LSH_PROJECTION");
 static_assert(static_cast<int32_t>(OperationType::LSTM) == ANEURALNETWORKS_LSTM,
               "OperationType::LSTM != ANEURALNETWORKS_LSTM");
@@ -226,21 +225,21 @@
 static_assert(static_cast<int32_t>(OperationType::RESHAPE) == ANEURALNETWORKS_RESHAPE,
               "OperationType::RESHAPE != ANEURALNETWORKS_RESHAPE");
 static_assert(static_cast<int32_t>(OperationType::RESIZE_BILINEAR) ==
-                          ANEURALNETWORKS_RESIZE_BILINEAR,
+                      ANEURALNETWORKS_RESIZE_BILINEAR,
               "OperationType::RESIZE_BILINEAR != ANEURALNETWORKS_RESIZE_BILINEAR");
 static_assert(static_cast<int32_t>(OperationType::RNN) == ANEURALNETWORKS_RNN,
               "OperationType::RNN != ANEURALNETWORKS_RNN");
 static_assert(static_cast<int32_t>(OperationType::SOFTMAX) == ANEURALNETWORKS_SOFTMAX,
               "OperationType::SOFTMAX != ANEURALNETWORKS_SOFTMAX");
-static_assert(static_cast<int32_t>(OperationType::SPACE_TO_DEPTH) ==
-                          ANEURALNETWORKS_SPACE_TO_DEPTH,
+static_assert(static_cast<int32_t>(OperationType::SPACE_TO_DEPTH) == ANEURALNETWORKS_SPACE_TO_DEPTH,
               "OperationType::SPACE_TO_DEPTH != ANEURALNETWORKS_SPACE_TO_DEPTH");
 static_assert(static_cast<int32_t>(OperationType::SVDF) == ANEURALNETWORKS_SVDF,
               "OperationType::SVDF != ANEURALNETWORKS_SVDF");
 static_assert(static_cast<int32_t>(OperationType::TANH) == ANEURALNETWORKS_TANH,
               "OperationType::TANH != ANEURALNETWORKS_TANH");
 
-static_assert(static_cast<int32_t>(OperationType::BATCH_TO_SPACE_ND) == ANEURALNETWORKS_BATCH_TO_SPACE_ND,
+static_assert(static_cast<int32_t>(OperationType::BATCH_TO_SPACE_ND) ==
+                      ANEURALNETWORKS_BATCH_TO_SPACE_ND,
               "OperationType::BATCH_TO_SPACE_ND != ANEURALNETWORKS_BATCH_TO_SPACE_ND");
 static_assert(static_cast<int32_t>(OperationType::DIV) == ANEURALNETWORKS_DIV,
               "OperationType::DIV != ANEURALNETWORKS_DIV");
@@ -249,12 +248,11 @@
 static_assert(static_cast<int32_t>(OperationType::PAD) == ANEURALNETWORKS_PAD,
               "OperationType::PAD != ANEURALNETWORKS_PAD");
 static_assert(static_cast<int32_t>(OperationType::SPACE_TO_BATCH_ND) ==
-                          ANEURALNETWORKS_SPACE_TO_BATCH_ND,
+                      ANEURALNETWORKS_SPACE_TO_BATCH_ND,
               "OperationType::SPACE_TO_BATCH_ND != ANEURALNETWORKS_SPACE_TO_BATCH_ND");
 static_assert(static_cast<int32_t>(OperationType::SQUEEZE) == ANEURALNETWORKS_SQUEEZE,
               "OperationType::SQUEEZE != ANEURALNETWORKS_SQUEEZE");
-static_assert(static_cast<int32_t>(OperationType::STRIDED_SLICE) ==
-                          ANEURALNETWORKS_STRIDED_SLICE,
+static_assert(static_cast<int32_t>(OperationType::STRIDED_SLICE) == ANEURALNETWORKS_STRIDED_SLICE,
               "OperationType::STRIDED_SLICE != ANEURALNETWORKS_STRIDED_SLICE");
 static_assert(static_cast<int32_t>(OperationType::SUB) == ANEURALNETWORKS_SUB,
               "OperationType::SUB != ANEURALNETWORKS_SUB");
@@ -270,6 +268,17 @@
 static_assert(static_cast<int32_t>(FusedActivationFunc::RELU6) == ANEURALNETWORKS_FUSED_RELU6,
               "FusedActivationFunc::RELU6 != ANEURALNETWORKS_FUSED_RELU6");
 
+// Make sure that the constants are compatible with the values defined in
+// hardware/interfaces/neuralnetworks/1.2/types.hal.
+static_assert(static_cast<int32_t>(DeviceType::OTHER) == ANEURALNETWORKS_DEVICE_OTHER,
+              "DeviceType::OTHER != ANEURALNETWORKS_DEVICE_OTHER");
+static_assert(static_cast<int32_t>(DeviceType::CPU) == ANEURALNETWORKS_DEVICE_CPU,
+              "DeviceType::CPU != ANEURALNETWORKS_DEVICE_CPU");
+static_assert(static_cast<int32_t>(DeviceType::GPU) == ANEURALNETWORKS_DEVICE_GPU,
+              "DeviceType::GPU != ANEURALNETWORKS_DEVICE_GPU");
+static_assert(static_cast<int32_t>(DeviceType::ACCELERATOR) == ANEURALNETWORKS_DEVICE_ACCELERATOR,
+              "DeviceType::ACCELERATOR != ANEURALNETWORKS_DEVICE_ACCELERATOR");
+
 // Asserts for ANeuralNetworksOperandType memory layout
 static_assert(offsetof(ANeuralNetworksOperandType, type) == 0,
               "ANeuralNetworksOperandType.type offset != 0");
@@ -344,6 +353,20 @@
     return ANEURALNETWORKS_NO_ERROR;
 }
 
+int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type) {
+    if (device == nullptr || type == nullptr) {
+        LOG(ERROR) << "ANeuralNetworksDevice_getType passed a nullptr";
+        return ANEURALNETWORKS_UNEXPECTED_NULL;
+    }
+    const Device* d = reinterpret_cast<const Device*>(device);
+    int32_t dType = d->getType();
+    if (dType < 0) {
+        return ANEURALNETWORKS_OP_FAILED;
+    }
+    *type = d->getType();
+    return ANEURALNETWORKS_NO_ERROR;
+}
+
 int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device,
                                           int64_t* featureLevel) {
     if (device == nullptr || featureLevel == nullptr) {
@@ -599,8 +622,7 @@
     return m->identifyInputsAndOutputs(inputCount, inputs, outputCount, outputs);
 }
 
-int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model,
-                                                          bool allow) {
+int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow) {
     NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_relaxComputationFloat32toFloat16");
     if (!model) {
         LOG(ERROR) << ("ANeuralNetworksModel_relaxComputationFloat32toFloat16 passed a nullptr");
diff --git a/runtime/VersionedInterfaces.cpp b/runtime/VersionedInterfaces.cpp
index e669540..eca3182 100644
--- a/runtime/VersionedInterfaces.cpp
+++ b/runtime/VersionedInterfaces.cpp
@@ -264,6 +264,24 @@
     }
 }
 
+int32_t VersionedIDevice::getType() const {
+    std::pair<ErrorStatus, DeviceType> result;
+    if (mDeviceV1_2 != nullptr) {
+        Return<void> ret =
+                mDeviceV1_2->getType([&result](ErrorStatus error, DeviceType deviceType) {
+                    result = std::make_pair(error, deviceType);
+                });
+        if (!ret.isOk()) {
+            LOG(ERROR) << "getType failure: " << ret.description();
+            return -1;
+        }
+        return static_cast<int32_t>(result.second);
+    } else {
+        LOG(INFO) << "Unkown NNAPI device type.";
+        return ANEURALNETWORKS_DEVICE_UNKNOWN;
+    }
+}
+
 std::pair<ErrorStatus, hidl_string> VersionedIDevice::getVersionString() {
     std::pair<ErrorStatus, hidl_string> result;
 
diff --git a/runtime/VersionedInterfaces.h b/runtime/VersionedInterfaces.h
index 1c4311f..4672b47 100644
--- a/runtime/VersionedInterfaces.h
+++ b/runtime/VersionedInterfaces.h
@@ -167,10 +167,26 @@
      * @return featureLevel The API level of the most advanced feature this driver implements.
      *                      For example, if the driver implements the features introduced in
      *                      Android P, the value would be 28.
+     *                      Return -1 if the driver is offline or busy, or the query resulted in
+     *                      an unspecified error.
      */
     int64_t getFeatureLevel();
 
     /**
+     * Returns the device type of a driver.
+     *
+     * @return deviceType The type of a given device, which can help application developers
+     *                    developers to distribute Machine Learning workloads and other workloads
+     *                    such as graphical rendering. E.g., for an app which renders AR scenes
+     *                    based on real time object detection results, the developer could choose
+     *                    an ACCELERATOR type device for ML workloads, and reserve GPU for
+     *                    graphical rendering.
+     *                    Return -1 if the driver is offline or busy, or the query resulted in
+     *                    an unspecified error.
+     */
+    int32_t getType() const;
+
+    /**
      * Get the version string of the driver implementation.
      *
      * The version string must be a unique token among the set of version strings of
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 3dc4e96..23a2c79 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -3915,6 +3915,25 @@
 } PreferenceCode;
 
 /**
+ * Device types.
+ *
+ * The type of NNAPI device.
+ */
+typedef enum {
+    /** The device type cannot be provided. */
+    ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
+    /** The device does not fall into any category below. */
+    ANEURALNETWORKS_DEVICE_OTHER = 1,
+    /** The device runs NNAPI models on single or multi-core CPU. */
+    ANEURALNETWORKS_DEVICE_CPU = 2,
+    /** The device can run NNAPI models and also accelerate graphics APIs such
+     * as OpenGL ES and Vulkan. */
+    ANEURALNETWORKS_DEVICE_GPU = 3,
+    /** Dedicated accelerator for Machine Learning workloads. */
+    ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
+} DeviceTypeCode;
+
+/**
  * Result codes.
  *
  * <p>Any NNAPI function can return any result code, including result codes not
@@ -4217,19 +4236,19 @@
 #if __ANDROID_API__ >= __ANDROID_API_Q__
 
 /**
- * ANeuralNetworksDevice is an opaque type that represents an accelerator.
+ * ANeuralNetworksDevice is an opaque type that represents a device.
  *
  * This type is used to query basic properties and supported operations of the corresponding
- * accelerator, and control which accelerator(s) a model is to be run on.
+ * device, and control which device(s) a model is to be run on.
  *
  * Available since API level 29.
  */
 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
 
 /**
- * Get the number of available accelerators.
+ * Get the number of available devices.
  *
- * @param numDevices Used to return the number of accelerators.
+ * @param numDevices Used to return the number of devices.
  *
  * @return ANEURALNETWORKS_NO_ERROR if successful.
  *
@@ -4238,13 +4257,13 @@
 int ANeuralNetworks_getDeviceCount(uint32_t* numDevices);
 
 /**
- * Get the representation of the specified accelerator.
+ * Get the representation of the specified device.
  *
- * @param devIndex The index of the specified accelerator. Must be less than the
-                   number of available accelerators.
- * @param device The representation of the specified accelerator.
+ * @param devIndex The index of the specified device. Must be less than the
+                   number of available devices.
+ * @param device The representation of the specified device.
  *               The same representation will always be returned for the specified
- *               accelerator.
+ *               device.
  *
  * @return ANEURALNETWORKS_NO_ERROR if successful.
  *
@@ -4253,10 +4272,10 @@
 int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device);
 
 /**
- * Get the name of the specified accelerator.
+ * Get the name of the specified device.
  *
- * @param device The representation of the specified accelerator.
- * @param name   The returned name of the specified accelerator. The name will be in UTF-8
+ * @param device The representation of the specified device.
+ * @param name   The returned name of the specified device. The name will be in UTF-8
  *               and will be null-terminated. It will be recognizable as a known device name
  *               rather than a cryptic string. For devices with feature level 29 and above, the
  *               format of the name is {VENDOR}-{DEVICE}, e.g. “google-ipu”. For devices with
@@ -4270,7 +4289,25 @@
 int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name);
 
 /**
- * Get the version of the driver implementation of the specified accelerator.
+ * Get the type of a given device.
+ *
+ * The device type can be used to help application developers to distribute Machine Learning
+ * workloads and other workloads such as graphical rendering.
+ * E.g., for an app which renders AR scenes based on real time object detection results,
+ * the developer could choose an ACCELERATOR type device for ML workloads, and reserve GPU
+ * for graphical rendering.
+ *
+ * @param device The representation of the specified device.
+ * @param type The returned {@link DeviceTypeCode} of the specified device.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *
+ * Available since API level 29.
+ */
+int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type);
+
+/**
+ * Get the version of the driver implementation of the specified device.
  *
  * It’s the responsibility of the driver implementor to insure that this version string
  * uniquely distinguishes this implementation from all previous implementations.
@@ -4286,8 +4323,8 @@
  *     - A specific version of the driver has a bug or returns results that don’t match
  *       the minimum precision requirement for the application.
  *
- * @param device The representation of the specified accelerator.
- * @param version The returned version string of the driver for the specified accelerator. The
+ * @param device The representation of the specified device.
+ * @param version The returned version string of the driver for the specified device. The
  *                string will be in UTF-8 and will be null-terminated. For devices with feature
  *                level 28 or lower, "UNKOWN" will be returned. The version string will remain
  *                valid for the duration of the application.
@@ -4299,15 +4336,15 @@
 int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version);
 
 /**
- * Get the supported NNAPI version of the specified accelerator.
+ * Get the supported NNAPI version of the specified device.
  *
- * Each accelerator has a supported feature level, which is the most advanced feature this driver
+ * Each device has a supported feature level, which is the most advanced feature this driver
  * implements. For example, if the driver implements the features introduced in Android P,
  * but does not implement the features introduced after Android P, the value would be 28.
- * Developers could decide whether or not the specified accelerator should be used for a Model that
+ * Developers could decide whether or not the specified device should be used for a Model that
  * has certain feature requirements.
  *
- * @param device The representation of the specified accelerator.
+ * @param device The representation of the specified device.
  * @param featureLevel The API level of the most advanced feature this driver implements.
  *
  * @return ANEURALNETWORKS_NO_ERROR if successful.
@@ -4318,13 +4355,13 @@
                                           int64_t* featureLevel);
 
 /**
- * Get the supported operations for a specified set of accelerators. If multiple devices
+ * Get the supported operations for a specified set of devices. If multiple devices
  * are selected, the supported operation list is a union of supported operations of all
  * selected devices.
  *
  * @param model The model to be queried.
- * @param devices The set of accelerators. Must not contain duplicates.
- * @param numDevices The number of accelerators in the set.
+ * @param devices The set of devices. Must not contain duplicates.
+ * @param numDevices The number of devices in the set.
  * @param supportedOps The boolean array to be filled. True means supported. The size of the
  *                     boolean array must be at least as large as the number of operations
  *                     in the model. The order of elements in the supportedOps array matches
@@ -4340,15 +4377,15 @@
 
 /**
  * Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set
- * of accelerators. If more than one accelerator is specified, the compilation will
- * distribute the workload automatically across the accelerators. The model must be fully
- * supported by the specified set of accelerators. This means that
+ * of devices. If more than one device is specified, the compilation will
+ * distribute the workload automatically across the devices. The model must be fully
+ * supported by the specified set of devices. This means that
  * ANeuralNetworksModel_getSupportedOperationsForDevices() must have returned true for every
  * operation for that model/devices pair.
  *
  * @param model The {@link ANeuralNetworksModel} to be compiled.
- * @param devices The set of accelerators. Must not contain duplicates.
- * @param numDevices The number of accelerators in the set.
+ * @param devices The set of devices. Must not contain duplicates.
+ * @param numDevices The number of devices in the set.
  * @param compilation The newly created object or NULL if unsuccessful.
  *
  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
diff --git a/runtime/libneuralnetworks.map.txt b/runtime/libneuralnetworks.map.txt
index bbe6c65..e6af99a 100644
--- a/runtime/libneuralnetworks.map.txt
+++ b/runtime/libneuralnetworks.map.txt
@@ -23,6 +23,7 @@
     ANeuralNetworks_getDeviceCount; # introduced=Q
     ANeuralNetworks_getDevice; # introduced=Q
     ANeuralNetworksDevice_getName; # introduced=Q
+    ANeuralNetworksDevice_getType; # introduced=Q
     ANeuralNetworksDevice_getVersion; # introduced=Q
     ANeuralNetworksDevice_getFeatureLevel; # introduced=Q
     ANeuralNetworksMemory_createFromFd;
diff --git a/runtime/test/TestValidation.cpp b/runtime/test/TestValidation.cpp
index 5273725..e87f067 100644
--- a/runtime/test/TestValidation.cpp
+++ b/runtime/test/TestValidation.cpp
@@ -19,20 +19,19 @@
 
 #include <android/sharedmem.h>
 #include <gtest/gtest.h>
-#include <string>
 #include <sys/mman.h>
-
+#include <string>
 
 // This file tests all the validations done by the Neural Networks API.
 
 namespace {
 class ValidationTest : public ::testing::Test {
-protected:
+   protected:
     virtual void SetUp() {}
 };
 
 class ValidationTestModel : public ValidationTest {
-protected:
+   protected:
     virtual void SetUp() {
         ValidationTest::SetUp();
         ASSERT_EQ(ANeuralNetworksModel_create(&mModel), ANEURALNETWORKS_NO_ERROR);
@@ -47,9 +46,8 @@
         ANeuralNetworksOperandType tensorType{.type = ANEURALNETWORKS_TENSOR_FLOAT32,
                                               .dimensionCount = 1,
                                               .dimensions = dimensions};
-        ANeuralNetworksOperandType scalarType{.type = ANEURALNETWORKS_INT32,
-                                              .dimensionCount = 0,
-                                              .dimensions = nullptr};
+        ANeuralNetworksOperandType scalarType{
+                .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr};
 
         ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
         ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
@@ -78,9 +76,8 @@
         ANeuralNetworksOperandType tensorType{.type = ANEURALNETWORKS_TENSOR_FLOAT32,
                                               .dimensionCount = 1,
                                               .dimensions = dimensions};
-        ANeuralNetworksOperandType scalarType{.type = ANEURALNETWORKS_INT32,
-                                              .dimensionCount = 0,
-                                              .dimensions = nullptr};
+        ANeuralNetworksOperandType scalarType{
+                .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr};
         ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
         ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
         ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &scalarType), ANEURALNETWORKS_NO_ERROR);
@@ -112,7 +109,7 @@
 };
 
 class ValidationTestExecution : public ValidationTestCompilation {
-protected:
+   protected:
     virtual void SetUp() {
         ValidationTestCompilation::SetUp();
 
@@ -134,47 +131,46 @@
 
 TEST_F(ValidationTestModel, AddOperand) {
     ANeuralNetworksOperandType floatType{
-                .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
+            .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
     EXPECT_EQ(ANeuralNetworksModel_addOperand(nullptr, &floatType),
               ANEURALNETWORKS_UNEXPECTED_NULL);
     EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
 
     ANeuralNetworksOperandType quant8TypeInvalidScale{
-                .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
-                .dimensionCount = 0,
-                .dimensions = nullptr,
-                // Scale has to be non-negative
-                .scale = -1.0f,
-                .zeroPoint = 0,
-              };
+            .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
+            .dimensionCount = 0,
+            .dimensions = nullptr,
+            // Scale has to be non-negative
+            .scale = -1.0f,
+            .zeroPoint = 0,
+    };
     EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &quant8TypeInvalidScale),
               ANEURALNETWORKS_BAD_DATA);
 
     ANeuralNetworksOperandType quant8TypeInvalidZeroPoint{
-                .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
-                .dimensionCount = 0,
-                .dimensions = nullptr,
-                .scale = 1.0f,
-                // zeroPoint has to be in [0, 255]
-                .zeroPoint = -1,
-              };
+            .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
+            .dimensionCount = 0,
+            .dimensions = nullptr,
+            .scale = 1.0f,
+            // zeroPoint has to be in [0, 255]
+            .zeroPoint = -1,
+    };
     EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &quant8TypeInvalidZeroPoint),
               ANEURALNETWORKS_BAD_DATA);
 
     uint32_t dim = 2;
     ANeuralNetworksOperandType invalidScalarType{
-                .type = ANEURALNETWORKS_INT32,
-                // scalar types can only 0 dimensions.
-                .dimensionCount = 1,
-                .dimensions = &dim,
-              };
+            .type = ANEURALNETWORKS_INT32,
+            // scalar types can only 0 dimensions.
+            .dimensionCount = 1,
+            .dimensions = &dim,
+    };
     EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &invalidScalarType),
               ANEURALNETWORKS_BAD_DATA);
 
     ANeuralNetworksModel_finish(mModel);
     // This should fail, as the model is already finished.
-    EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType),
-              ANEURALNETWORKS_BAD_STATE);
+    EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_BAD_STATE);
 }
 
 TEST_F(ValidationTestModel, SetOperandSymmPerChannelQuantParams) {
@@ -207,7 +203,7 @@
 
 TEST_F(ValidationTestModel, SetOptionalOperand) {
     ANeuralNetworksOperandType floatType{
-                .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
+            .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
     EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR);
 
     EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, nullptr, 0),
@@ -216,7 +212,7 @@
 
 TEST_F(ValidationTestModel, SetOperandValue) {
     ANeuralNetworksOperandType floatType{
-                .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
+            .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
     EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR);
 
     char buffer[20];
@@ -246,9 +242,7 @@
 TEST_F(ValidationTestModel, SetOperandValueFromMemory) {
     uint32_t dimensions[]{1};
     ANeuralNetworksOperandType floatType{
-                .type = ANEURALNETWORKS_TENSOR_FLOAT32,
-                .dimensionCount = 1,
-                .dimensions = dimensions};
+            .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dimensions};
     EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR);
 
     const size_t memorySize = 20;
@@ -256,56 +250,46 @@
     ASSERT_GT(memoryFd, 0);
 
     ANeuralNetworksMemory* memory;
-    EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE,
-                                                 memoryFd, 0, &memory),
+    EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, 0,
+                                                 &memory),
               ANEURALNETWORKS_NO_ERROR);
 
-    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(nullptr, 0,
-                                                             memory, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(nullptr, 0, memory, 0, sizeof(float)),
               ANEURALNETWORKS_UNEXPECTED_NULL);
-    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0,
-                                                             nullptr, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, nullptr, 0, sizeof(float)),
               ANEURALNETWORKS_UNEXPECTED_NULL);
 
     // This should fail, since the operand does not exist.
-    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, -1,
-                                                             memory, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, -1, memory, 0, sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since memory is not the size of a float32.
-    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0,
-                                                             memory, 0, memorySize),
+    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, 0, memorySize),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, as this operand does not exist.
-    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 1,
-                                                             memory, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 1, memory, 0, sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since offset is larger than memorySize.
-    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0,
-                                                             memory, memorySize + 1,
+    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize + 1,
                                                              sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since requested size is larger than the memory.
-    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0,
-                                                             memory, memorySize - 3,
+    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize - 3,
                                                              sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     ANeuralNetworksModel_finish(mModel);
     // This should fail, as the model is already finished.
-    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0,
-                                                             memory, 0,
-                                                             sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, 0, sizeof(float)),
               ANEURALNETWORKS_BAD_STATE);
 }
 
-
 TEST_F(ValidationTestModel, AddOEMOperand) {
     ANeuralNetworksOperandType OEMScalarType{
-                .type = ANEURALNETWORKS_OEM_SCALAR, .dimensionCount = 0, .dimensions = nullptr};
+            .type = ANEURALNETWORKS_OEM_SCALAR, .dimensionCount = 0, .dimensions = nullptr};
     EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &OEMScalarType), ANEURALNETWORKS_NO_ERROR);
     char buffer[20];
     EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer, sizeof(buffer)),
@@ -314,9 +298,7 @@
     const size_t kByteSizeOfOEMTensor = 4;
     uint32_t dimensions[]{kByteSizeOfOEMTensor};
     ANeuralNetworksOperandType OEMTensorType{
-                .type = ANEURALNETWORKS_TENSOR_OEM_BYTE,
-                .dimensionCount = 1,
-                .dimensions = dimensions};
+            .type = ANEURALNETWORKS_TENSOR_OEM_BYTE, .dimensionCount = 1, .dimensions = dimensions};
     EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &OEMTensorType), ANEURALNETWORKS_NO_ERROR);
     EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 1, buffer, kByteSizeOfOEMTensor),
               ANEURALNETWORKS_NO_ERROR);
@@ -340,8 +322,7 @@
               ANEURALNETWORKS_UNEXPECTED_NULL);
 
     ANeuralNetworksOperationType invalidOp = -1;
-    EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, invalidOp, 1, &input,
-                                                1, &output),
+    EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, invalidOp, 1, &input, 1, &output),
               ANEURALNETWORKS_BAD_DATA);
 
     ANeuralNetworksModel_finish(mModel);
@@ -538,8 +519,7 @@
               ANEURALNETWORKS_UNEXPECTED_NULL);
     EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, nullptr),
               ANEURALNETWORKS_UNEXPECTED_NULL);
-    EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution),
-              ANEURALNETWORKS_BAD_STATE);
+    EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_BAD_STATE);
 }
 
 // Also see TEST_F(ValidationTestCompilationForDevices, Finish)
@@ -607,40 +587,40 @@
     ASSERT_GT(memoryFd, 0);
 
     ANeuralNetworksMemory* memory;
-    EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE,
-                                                 memoryFd, 0, &memory),
+    EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, 0,
+                                                 &memory),
               ANEURALNETWORKS_NO_ERROR);
 
-    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(nullptr, 0, nullptr,
-                                                          memory, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(nullptr, 0, nullptr, memory, 0,
+                                                          sizeof(float)),
               ANEURALNETWORKS_UNEXPECTED_NULL);
-    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr,
-                                                          nullptr, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, nullptr, 0,
+                                                          sizeof(float)),
               ANEURALNETWORKS_UNEXPECTED_NULL);
 
     // This should fail, since the operand does not exist.
-    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 999, nullptr,
-                                                          memory, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 999, nullptr, memory, 0,
+                                                          sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since the operand does not exist.
-    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, -1, nullptr,
-                                                          memory, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, -1, nullptr, memory, 0,
+                                                          sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since memory is not the size of a float32.
-    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr,
-                                                          memory, 0, memorySize),
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory, 0,
+                                                          memorySize),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since offset is larger than memorySize.
-    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr,
-                                                          memory, memorySize + 1, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory,
+                                                          memorySize + 1, sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since requested size is larger than the memory.
-    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr,
-                                                          memory, memorySize - 3, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory,
+                                                          memorySize - 3, sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 }
 
@@ -653,40 +633,40 @@
     ASSERT_GT(memoryFd, 0);
 
     ANeuralNetworksMemory* memory;
-    EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE,
-                                                 memoryFd, 0, &memory),
+    EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, 0,
+                                                 &memory),
               ANEURALNETWORKS_NO_ERROR);
 
-    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(nullptr, 0, nullptr,
-                                                           memory, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(nullptr, 0, nullptr, memory, 0,
+                                                           sizeof(float)),
               ANEURALNETWORKS_UNEXPECTED_NULL);
-    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr,
-                                                           nullptr, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, nullptr, 0,
+                                                           sizeof(float)),
               ANEURALNETWORKS_UNEXPECTED_NULL);
 
     // This should fail, since the operand does not exist.
-    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 999, nullptr,
-                                                           memory, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 999, nullptr, memory, 0,
+                                                           sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since the operand does not exist.
-    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, -1, nullptr,
-                                                           memory, 0, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, -1, nullptr, memory, 0,
+                                                           sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since memory is not the size of a float32.
-    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr,
-                                                           memory, 0, memorySize),
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0,
+                                                           memorySize),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since offset is larger than memorySize.
-    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr,
-                                                           memory, memorySize + 1, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory,
+                                                           memorySize + 1, sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 
     // This should fail, since requested size is larger than the memory.
-    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr,
-                                                           memory, memorySize - 3, sizeof(float)),
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory,
+                                                           memorySize - 3, sizeof(float)),
               ANEURALNETWORKS_BAD_DATA);
 }
 
@@ -729,8 +709,7 @@
     EXPECT_EQ(ANeuralNetworks_getDevice(numDevices, &device), ANEURALNETWORKS_BAD_DATA);
 }
 
-static void deviceStringCheck(
-        std::function<int(const ANeuralNetworksDevice*, const char**)> func) {
+static void deviceStringCheck(std::function<int(const ANeuralNetworksDevice*, const char**)> func) {
     uint32_t numDevices = 0;
     EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
 
@@ -774,6 +753,29 @@
               ANEURALNETWORKS_UNEXPECTED_NULL);
 }
 
+TEST(ValidationTestIntrospection, DeviceGetType) {
+    uint32_t numDevices = 0;
+    EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
+
+    int32_t validTypes[] = {ANEURALNETWORKS_DEVICE_UNKNOWN, ANEURALNETWORKS_DEVICE_OTHER,
+                            ANEURALNETWORKS_DEVICE_CPU, ANEURALNETWORKS_DEVICE_GPU,
+                            ANEURALNETWORKS_DEVICE_ACCELERATOR};
+    int32_t deviceType;
+    for (uint32_t i = 0; i < numDevices; i++) {
+        SCOPED_TRACE(i);
+        // Initialize the deviceType to be an invalid type.
+        deviceType = -1;
+        ANeuralNetworksDevice* device;
+        EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
+        EXPECT_EQ(ANeuralNetworksDevice_getType(device, &deviceType), ANEURALNETWORKS_NO_ERROR);
+        EXPECT_TRUE(std::find(std::begin(validTypes), std::end(validTypes), deviceType) !=
+                    std::end(validTypes));
+        EXPECT_EQ(ANeuralNetworksDevice_getType(device, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
+    }
+    EXPECT_EQ(ANeuralNetworksDevice_getType(nullptr, &deviceType), ANEURALNETWORKS_UNEXPECTED_NULL);
+    EXPECT_EQ(ANeuralNetworksDevice_getType(nullptr, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
 class ValidationTestCompilationForDevices : public ValidationTestModel {
    protected:
     virtual void SetUp() override {