Implement the following introspection APIs:
- ANeuralNetworks_getDeviceCount
- ANeuralNetworks_getDevice
- ANeuralNetworksDevice_getName
- ANeuralNetworksDevice_getVersion
- ANeuralNetworksDevice_getFeatureLevel
The above APIs are added to NDK, with corresponding NNAPI runtime
changes. Validation tests are also added for each API listed above.
Bug: 112661538
Bug: 112661284
Bug: 111425781
Test: mm
Test: NeuralNetworksTest_static
Change-Id: If58fe70d3f9a79c2588c80f4709447bcacd97e09
Merged-In: If58fe70d3f9a79c2588c80f4709447bcacd97e09
(cherry picked from commit d130fb58a246d1cbcbbbe811173f057d567f7b19)
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp
index 33b2429..25172b9 100644
--- a/runtime/Manager.cpp
+++ b/runtime/Manager.cpp
@@ -58,6 +58,13 @@
mRelaxedFloat32toFloat16Performance = capabilities.relaxedFloat32toFloat16Performance;
}
+ auto result = mInterface.getVersionString();
+ // TODO(miaowang): add a validation test case for in case of error.
+ if (result.first != ErrorStatus::NONE) {
+ LOG(ERROR) << "IDevice::getVersionString returned the error " << toString(status);
+ } else {
+ mVersionString = result.second;
+ }
return status == ErrorStatus::NONE;
}
diff --git a/runtime/Manager.h b/runtime/Manager.h
index d5d05c9..e086e45 100644
--- a/runtime/Manager.h
+++ b/runtime/Manager.h
@@ -36,7 +36,9 @@
public:
Device(std::string name, const sp<V1_0::IDevice>& device);
VersionedIDevice* getInterface() { return &mInterface; }
- const std::string& getName() const { return mName; }
+ const char* getName() const { return mName.c_str(); }
+ const char* getVersionString() const { return mVersionString.c_str(); }
+
// Returns true if succesfully initialized.
bool initialize();
@@ -50,6 +52,7 @@
private:
std::string mName;
+ std::string mVersionString;
VersionedIDevice mInterface;
PerformanceInfo mFloat32Performance;
PerformanceInfo mQuantized8Performance;
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index bab9feb..66d7241 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -260,6 +260,64 @@
using android::sp;
using namespace android::nn;
+int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) {
+ if (numDevices == nullptr) {
+ LOG(ERROR) << "ANeuralNetworks_getDeviceCount passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ *numDevices = DeviceManager::get()->getDrivers().size();
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device) {
+ if (device == nullptr) {
+ LOG(ERROR) << "ANeuralNetworks_getDevice passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const std::vector<std::shared_ptr<Device>>& devices = DeviceManager::get()->getDrivers();
+ if (devIndex >= devices.size()) {
+ LOG(ERROR) << "ANeuralNetworks_getDevice passed an invalid device index";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ *device = reinterpret_cast<ANeuralNetworksDevice*>(devices.at(devIndex).get());
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name) {
+ if (device == nullptr || name == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksDevice_getName passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const Device* d = reinterpret_cast<const Device*>(device);
+ *name = d->getName();
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version) {
+ if (device == nullptr || version == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksDevice_getVersion passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const Device* d = reinterpret_cast<const Device*>(device);
+ *version = d->getVersionString();
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device,
+ int64_t* featureLevel) {
+ if (device == nullptr || featureLevel == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksDevice_getFeatureLevel passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ Device* d = reinterpret_cast<Device*>(const_cast<ANeuralNetworksDevice*>(device));
+ int64_t dFeatureLevel = d->getInterface()->getFeatureLevel();
+ if (dFeatureLevel < 0) {
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+ *featureLevel = dFeatureLevel;
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
int ANeuralNetworksMemory_createFromFd(size_t size, int prot, int fd, size_t offset,
ANeuralNetworksMemory** memory) {
NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksMemory_createFromFd");
diff --git a/runtime/VersionedIDevice.cpp b/runtime/VersionedIDevice.cpp
index 9b90766..4d47f13 100644
--- a/runtime/VersionedIDevice.cpp
+++ b/runtime/VersionedIDevice.cpp
@@ -192,6 +192,40 @@
return static_cast<DeviceStatus>(ret);
}
+int64_t VersionedIDevice::getFeatureLevel() {
+ if (mDeviceV1_2 != nullptr) {
+ return __ANDROID_API_Q__;
+ } else if (mDeviceV1_1 != nullptr) {
+ return __ANDROID_API_P__;
+ } else if (mDeviceV1_0 != nullptr) {
+ return __ANDROID_API_O_MR1__;
+ } else {
+ LOG(ERROR) << "Device not available!";
+ return -1;
+ }
+}
+
+std::pair<ErrorStatus, hidl_string> VersionedIDevice::getVersionString() {
+ std::pair<ErrorStatus, hidl_string> result;
+
+ if (mDeviceV1_2 != nullptr) {
+ Return<void> ret = mDeviceV1_2->getVersionString(
+ [&result](ErrorStatus error, const hidl_string& version) {
+ result = std::make_pair(error, version);
+ });
+ if (!ret.isOk()) {
+ LOG(ERROR) << "getVersion failure: " << ret.description();
+ return {ErrorStatus::GENERAL_FAILURE, ""};
+ }
+ return result;
+ } else if (mDeviceV1_1 != nullptr || mDeviceV1_0 != nullptr) {
+ return {ErrorStatus::NONE, "UNKNOWN"};
+ } else {
+ LOG(ERROR) << "Could not handle getVersionString";
+ return {ErrorStatus::GENERAL_FAILURE, ""};
+ }
+}
+
bool VersionedIDevice::operator==(nullptr_t) {
return mDeviceV1_0 == nullptr;
}
diff --git a/runtime/VersionedIDevice.h b/runtime/VersionedIDevice.h
index 8b3fe95..d190063 100644
--- a/runtime/VersionedIDevice.h
+++ b/runtime/VersionedIDevice.h
@@ -157,6 +157,45 @@
DeviceStatus getStatus();
/**
+ * Returns the feature level of a driver.
+ *
+ * @return featureLevel The API level of the most advanced feature this driver implements.
+ * For example, if the driver implements the features introduced in
+ * Android P, the value would be 28.
+ */
+ int64_t getFeatureLevel();
+
+ /**
+ * Get the version string of the driver implementation.
+ *
+ * The version string must be a unique token among the set of version strings of
+ * drivers of a specific device. The token identifies the device driver's
+ * implementation. The token must not be confused with the feature level which is solely
+ * defined by the interface version. This API is opaque to the Android framework, but the
+ * Android framework may use the information for debugging or to pass on to NNAPI applications.
+ *
+ * Application developers sometimes have specific requirements to ensure good user experiences,
+ * and they need more information to make intelligent decisions when the Android framework
+ * cannot. For example, combined with the device name and other information, the token can help
+ * NNAPI applications filter devices based on their needs:
+ * - An application demands a certain level of performance, but a specific version of
+ * the driver cannot meet that requirement because of a performance regression.
+ * The application can blacklist the driver based on the version provided.
+ * - An application has a minimum precision requirement, but certain versions of
+ * the driver cannot meet that requirement because of bugs or certain optimizations.
+ * The application can filter out versions of these drivers.
+ *
+ * @return status Error status returned from querying the version string. Must be:
+ * - NONE if the query was successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if the query resulted in an
+ * unspecified error
+ * @return version The version string of the device implementation.
+ * Must have nonzero length if the query is successful, and must be an empty string if not.
+ */
+ std::pair<ErrorStatus, hidl_string> getVersionString();
+
+ /**
* Returns whether this handle to an IDevice object is valid or not.
*
* @return bool true if V1_0::IDevice (which could be V1_1::IDevice) is
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 8f05ff6..285ba7a 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -2788,6 +2788,111 @@
*/
typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
+#if __ANDROID_API__ >= __ANDROID_API_Q__
+
+/**
+ * ANeuralNetworksDevice is an opaque type that represents an accelerator.
+ *
+ * This type is used to query basic properties and supported operations of specified
+ * accelerator, and control which accelerator(s) a model is to be run on.
+ *
+ * Available since API level 29.
+ */
+typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
+
+/**
+ * Get the number of available accelerators.
+ *
+ * @param numDevices Used to return the number of accelerators.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *
+ * Available since API level 29.
+ */
+int ANeuralNetworks_getDeviceCount(uint32_t* numDevices);
+
+/**
+ * Get the representation of the specified accelerator.
+ *
+ * @param devIndex The index of the specified accelerator. Must be less than the
+ number of available accelerators.
+ * @param device The representation of the specified accelerator.
+ * The same representation will always be returned for the specified
+ * accelerator.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *
+ * Available since API level 29.
+ */
+int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device);
+
+/**
+ * Get the name of the specified accelerator.
+ *
+ * @param device The representation of the specified accelerator.
+ * @param name The returned name of the specified accelerator. The name will be in UTF-8
+ * and will be null-terminated. It will be recognizable as a known device name
+ * rather than a cryptic string. For devices with feature level 29 and above, the
+ * format of the name is {VENDOR}-{DEVICE}, e.g. “google-ipu”. For devices with
+ * feature level 28 or lower, the name will always be “unknown-device”.
+ * The name will remain valid for the duration of the application.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *
+ * Available since API level 29.
+ */
+int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name);
+
+/**
+ * Get the version of the driver implementation of the specified accelerator.
+ *
+ * It’s the responsibility of the driver implementor to insure that this version string
+ * uniquely distinguishes this implementation from all previous implementations.
+ *
+ * This version string must not be confused with the feature level which is solely defined
+ * by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no implicit ordering of the versions.
+ * For example, it is not possible to filter all drivers older than a certain version.
+ *
+ * Application developers may use this version string to avoid or prefer specific driver
+ * implementations. For example, an application may want to do so because:
+ * - A specific version of the driver does not provide the required performance,
+ * perhaps because of a performance regression.
+ * - A specific version of the driver has a bug or returns results that don’t match
+ * the minimum precision requirement for the application.
+ *
+ * @param device The representation of the specified accelerator.
+ * @param version The returned version string of the driver for the specified accelerator. The
+ * string will be in UTF-8 and will be null-terminated. For devices with feature
+ * level 28 or lower, "UNKOWN" will be returned. The version string will remain
+ * valid for the duration of the application.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *
+ * Available since API level 29.
+ */
+int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version);
+
+/**
+ * Get the supported NNAPI version of the specified accelerator.
+ *
+ * Each accelerator has a supported feature level, which is the most advanced feature this driver
+ * implements. For example, if the driver implements the features introduced in Android P,
+ * but does not implement the features introduced after Android P, the value would be 28.
+ * Developers could decide whether or not the specified accelerator should be used for a Model that
+ * has certain feature requirements.
+ *
+ * @param device The representation of the specified accelerator.
+ * @param featureLevel The API level of the most advanced feature this driver implements.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *
+ * Available since API level 29.
+ */
+int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device,
+ int64_t* featureLevel);
+
+#endif // __ANDROID_API__ >= __ANDROID_API_Q__
+
#if __ANDROID_API__ >= 27
/**
diff --git a/runtime/libneuralnetworks.map.txt b/runtime/libneuralnetworks.map.txt
index f558e93..e4c457c 100644
--- a/runtime/libneuralnetworks.map.txt
+++ b/runtime/libneuralnetworks.map.txt
@@ -20,6 +20,11 @@
LIBNEURALNETWORKS {
global:
+ ANeuralNetworks_getDeviceCount; # introduced=Q
+ ANeuralNetworks_getDevice; # introduced=Q
+ ANeuralNetworksDevice_getName; # introduced=Q
+ ANeuralNetworksDevice_getVersion; # introduced=Q
+ ANeuralNetworksDevice_getFeatureLevel; # introduced=Q
ANeuralNetworksMemory_createFromFd;
ANeuralNetworksMemory_free;
ANeuralNetworksModel_create;
diff --git a/runtime/test/TestValidation.cpp b/runtime/test/TestValidation.cpp
index 5b4dc6b..d26f7cf 100644
--- a/runtime/test/TestValidation.cpp
+++ b/runtime/test/TestValidation.cpp
@@ -580,4 +580,69 @@
TEST_F(ValidationTestExecution, EventWait) {
EXPECT_EQ(ANeuralNetworksEvent_wait(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
}
+
+TEST(ValidationTestIntrospection, GetNumDevices) {
+ uint32_t numDevices = 0;
+ EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworks_getDeviceCount(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST(ValidationTestIntrospection, GetDevice) {
+ uint32_t numDevices = 0;
+ EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
+
+ ANeuralNetworksDevice* device = nullptr;
+ for (uint32_t i = 0; i < numDevices; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_NE(device, nullptr);
+ }
+ EXPECT_EQ(ANeuralNetworks_getDevice(0, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworks_getDevice(numDevices, &device), ANEURALNETWORKS_BAD_DATA);
+}
+
+static void deviceStringCheck(
+ std::function<int(const ANeuralNetworksDevice*, const char**)> func) {
+ uint32_t numDevices = 0;
+ EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
+
+ const char* buffer;
+ for (uint32_t i = 0; i < numDevices; i++) {
+ SCOPED_TRACE(i);
+ ANeuralNetworksDevice* device;
+ EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(func(device, &buffer), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(func(device, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
+ }
+ EXPECT_EQ(func(nullptr, &buffer), ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(func(nullptr, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
+}
+
+TEST(ValidationTestIntrospection, DeviceGetName) {
+ deviceStringCheck(ANeuralNetworksDevice_getName);
+}
+
+TEST(ValidationTestIntrospection, DeviceGetVersion) {
+ deviceStringCheck(ANeuralNetworksDevice_getVersion);
+}
+
+TEST(ValidationTestIntrospection, DeviceGetFeatureLevel) {
+ uint32_t numDevices = 0;
+ EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
+
+ int64_t featureLevel;
+ for (uint32_t i = 0; i < numDevices; i++) {
+ SCOPED_TRACE(i);
+ ANeuralNetworksDevice* device;
+ EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel),
+ ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, nullptr),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ }
+ EXPECT_EQ(ANeuralNetworksDevice_getFeatureLevel(nullptr, &featureLevel),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+ EXPECT_EQ(ANeuralNetworksDevice_getFeatureLevel(nullptr, nullptr),
+ ANEURALNETWORKS_UNEXPECTED_NULL);
+}
} // namespace