Allow fine grain verbose logging control through system property.
- Existing DEBUG and INFO change to the new mechanism, existing ERROR and
WARNING unchanged.
- By default, all verbose logging are disabled.
- setprop debug.nn.vlog 1 enables all verbose logging.
- setprop debug.nn.vlog "tag1 tag2 ..." only enable the selected tags.
- Available tags:
all: same as 1.
model: enable logging with tag MODEL.
compilation: enable logging with tag COMPILATION.
execution: enable logging with tag EXECUTION.
cpuexe: enable logging with tag CPUEXE.
manager: enable logging with tag MANAGER.
driver: enable logging with tag DRIVER.
Bug: 63905942
Test: mm
Test: NeuralNetworksTests pass
Test: manually set property to allow individual module to logging
verbosely
Test: tested with debug.nn.partition default -> 0 -> 1 to make
sure getProp still works fine
Change-Id: Iaa0ffa62176dabcdc35058748597df720fd6e47e
diff --git a/runtime/CompilationBuilder.cpp b/runtime/CompilationBuilder.cpp
index e51aafe..a1e1a3f 100644
--- a/runtime/CompilationBuilder.cpp
+++ b/runtime/CompilationBuilder.cpp
@@ -29,7 +29,7 @@
CompilationBuilder::CompilationBuilder(const ModelBuilder* model) :
mModel(model) {
- LOG(DEBUG) << "CompilationBuilder::CompilationBuilder";
+ VLOG(COMPILATION) << "CompilationBuilder::CompilationBuilder";
}
int CompilationBuilder::finish() {
diff --git a/runtime/ExecutionBuilder.cpp b/runtime/ExecutionBuilder.cpp
index d43f6a3..56dc723 100644
--- a/runtime/ExecutionBuilder.cpp
+++ b/runtime/ExecutionBuilder.cpp
@@ -98,7 +98,7 @@
mInputs(mModel->inputCount()),
mOutputs(mModel->outputCount()),
mMemories(mModel->getMemories()) {
- LOG(DEBUG) << "ExecutionBuilder::ExecutionBuilder";
+ VLOG(EXECUTION) << "ExecutionBuilder::ExecutionBuilder";
}
int ExecutionBuilder::setInput(uint32_t index, const ANeuralNetworksOperandType* type,
@@ -186,7 +186,7 @@
// Ensure that executionCallback->notify() is called.
static void cpuFallbackFull(const ExecutionBuilder* executionBuilder,
const sp<ExecutionCallback>& executionCallback) {
- LOG(DEBUG) << "cpuFallbackFull";
+ VLOG(EXECUTION) << "cpuFallbackFull";
StepExecutor executor(executionBuilder, executionBuilder->getModel(),
nullptr /* no IDevice, so CPU */,
nullptr /* no IPreparedModel */);
@@ -210,7 +210,7 @@
const ExecutionPlan* plan,
std::shared_ptr<ExecutionPlan::Controller> controller,
const sp<ExecutionCallback>& executionCallback) {
- LOG(DEBUG) << "cpuFallbackPartial";
+ VLOG(EXECUTION) << "cpuFallbackPartial";
std::shared_ptr<StepExecutor> executor;
int n = plan->fallback(controller, &executor);
if (n != ANEURALNETWORKS_NO_ERROR || executor->isCpu()) {
@@ -235,10 +235,10 @@
std::shared_ptr<ExecutionPlan::Controller> controller,
bool allowFallback,
const sp<ExecutionCallback>& executionCallback) {
- LOG(DEBUG) << "ExecutionBuilder::startCompute (from plan, iteratively)";
+ VLOG(EXECUTION) << "ExecutionBuilder::startCompute (from plan, iteratively)";
while (true) {
std::shared_ptr<StepExecutor> executor;
- LOG(DEBUG) << "looking for next StepExecutor";
+ VLOG(EXECUTION) << "looking for next StepExecutor";
int n = plan->next(controller, &executor);
if (n != ANEURALNETWORKS_NO_ERROR) {
if (allowFallback) {
@@ -358,10 +358,10 @@
const std::vector<std::shared_ptr<Device>>& devices = DeviceManager::get()->getDrivers();
for (const auto& device : devices) {
hidl_vec<bool> supports;
- LOG(DEBUG) << "Checking " << device->getName();
+ VLOG(EXECUTION) << "Checking " << device->getName();
device->getSupportedOperations(hidlModel, &supports);
if (std::find(supports.begin(), supports.end(), false) == supports.end()) {
- LOG(DEBUG) << "ExecutionBuilder::startCompute (without plan) on " << device->getName();
+ VLOG(EXECUTION) << "ExecutionBuilder::startCompute (without plan) on " << device->getName();
StepExecutor executor(this, mModel, device->getInterface(),
nullptr /* no IPreparedModel, so compile */);
executor.mapInputsAndOutputsTrivially();
@@ -372,7 +372,7 @@
#endif // DISABLE_PARTITIONED_EXECUTION
// Run on the CPU.
- LOG(DEBUG) << "ExecutionBuilder::startCompute (without plan) on CPU";
+ VLOG(EXECUTION) << "ExecutionBuilder::startCompute (without plan) on CPU";
StepExecutor executor(this, mModel,
nullptr /* no IDevice, so CPU */,
nullptr /* no IPreparedModel */);
@@ -556,7 +556,7 @@
// in the design document.
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
- LOG(DEBUG) << "Before mPreparedModel->execute() " << toString(request);
+ VLOG(EXECUTION) << "Before mPreparedModel->execute() " << toString(request);
// Execute.
// TODO: What happens to the Callback if the service dies abnormally
// -- won't that keep the Callback live forever, because the service
@@ -565,7 +565,7 @@
// it seems like this is a small memory leak, if the Callback stays
// alive forever.
if (mPreparedModel->execute(request, executionCallback) != ErrorStatus::NONE) {
- LOG(DEBUG) << "**Execute failed**";
+ VLOG(EXECUTION) << "**Execute failed**";
return ANEURALNETWORKS_OP_FAILED;
}
@@ -574,7 +574,7 @@
executionCallback->wait();
Return<ErrorStatus> executionStatus = executionCallback->getStatus();
if (!executionStatus.isOk() || executionStatus != ErrorStatus::NONE) {
- LOG(DEBUG) << "**Execute async failed**";
+ VLOG(EXECUTION) << "**Execute async failed**";
return ANEURALNETWORKS_OP_FAILED;
}
@@ -593,7 +593,7 @@
memcpy(info.buffer, data + loc.offset, loc.length);
}
}
- LOG(DEBUG) << "StepExecutor::startComputeOnDevice completed";
+ VLOG(EXECUTION) << "StepExecutor::startComputeOnDevice completed";
*synchronizationCallback = executionCallback;
return ANEURALNETWORKS_NO_ERROR;
diff --git a/runtime/ExecutionPlan.cpp b/runtime/ExecutionPlan.cpp
index 5318ccd..d2f74d5 100644
--- a/runtime/ExecutionPlan.cpp
+++ b/runtime/ExecutionPlan.cpp
@@ -282,7 +282,7 @@
}
int ExecutionStep::finishSubModel(const ModelBuilder* fromModel, bool* hasOutputOfUnknownSize) {
- LOG(DEBUG) << "ExecutionStep::finishSubModel, step " << mIndex;
+ VLOG(COMPILATION) << "ExecutionStep::finishSubModel, step " << mIndex;
auto convertModelInputsOrOutputs = [](
// IN: mModel{Inputs|Outputs}
@@ -328,8 +328,9 @@
for (uint32_t dimension : operand.dimensions) {
if (dimension == 0) {
*hasOutputOfUnknownSize = true;
- LOG(DEBUG) << "SubModelOutput (operand#" << subModelOutput.first << " of original graph)"
- << " has unknown size: " << toString(operand);
+ VLOG(COMPILATION) << "SubModelOutput (operand#" << subModelOutput.first
+ << " of original graph) has unknown size: "
+ << toString(operand);
break;
}
}
@@ -352,16 +353,16 @@
return ANEURALNETWORKS_NO_ERROR;
}
- LOG(DEBUG) << "ExecutionStep::finishSubModel, compilation";
+ VLOG(COMPILATION) << "ExecutionStep::finishSubModel, compilation";
return compile(mDevice, mSubModel.get(), &mPreparedSubModel);
}
void ExecutionStep::dump() const {
Model model;
mSubModel->setHidlModel(&model);
- LOG(DEBUG) << "ExecutionStep#" << mIndex
- << " for " << (mDevice == nullptr ? "CPU" : mDevice->getName())
- << " submodel: " << toString(model);
+ VLOG(COMPILATION) << "ExecutionStep#" << mIndex
+ << " for " << (mDevice == nullptr ? "CPU" : mDevice->getName())
+ << " submodel: " << toString(model);
}
int ExecutionPlan::CompoundBody::finish(const ModelBuilder* fromModel) {
@@ -369,12 +370,12 @@
for (const auto& step : mSteps) {
int n = step->finishSubModel(fromModel, &mHasSubModelOutputOfUnknownSize);
if (n != ANEURALNETWORKS_NO_ERROR) {
- LOG(DEBUG) << "ExecutionPlan::CompoundBody::finish -- finishSubModel failed";
+ VLOG(COMPILATION) << "ExecutionPlan::CompoundBody::finish -- finishSubModel failed";
return n;
}
}
if (mHasSubModelOutputOfUnknownSize) {
- LOG(DEBUG) << "ExecutionPlan::CompoundBody::finish -- mHasSubModelOutputOfUnknownSize";
+ VLOG(COMPILATION) << "ExecutionPlan::CompoundBody::finish -- mHasSubModelOutputOfUnknownSize";
return ANEURALNETWORKS_OP_FAILED;
}
@@ -388,7 +389,7 @@
return ANEURALNETWORKS_NO_ERROR;
}
- LOG(DEBUG) << "ExecutionPlan::SimpleBody::finish, compilation";
+ VLOG(COMPILATION) << "ExecutionPlan::SimpleBody::finish, compilation";
const int n = compile(mDevice, mModel, &mPreparedModel);
mSuccessfulFinish = (n == ANEURALNETWORKS_NO_ERROR);
return n;
@@ -418,7 +419,7 @@
const ExecutionBuilder* executionBuilder) const {
nnAssert((mState == EMPTY) == (mBody == nullptr));
if (mBody && !mBody->mSuccessfulFinish) {
- LOG(DEBUG) << "ExecutionPlan::makeController -- unsuccessful finish";
+ VLOG(EXECUTION) << "ExecutionPlan::makeController -- unsuccessful finish";
return std::shared_ptr<Controller>(nullptr);
}
@@ -474,8 +475,8 @@
std::shared_ptr<StepExecutor>* executor) const {
*executor = nullptr;
- LOG(DEBUG) << "ExecutionPlan::fallback(" << controller << ", " << executor
- << "): mNextStepIndex = " << controller->mNextStepIndex;
+ VLOG(EXECUTION) << "ExecutionPlan::fallback(" << controller << ", " << executor
+ << "): mNextStepIndex = " << controller->mNextStepIndex;
if (controller->mNextStepIndex == 0) {
// We haven't called next().
@@ -495,8 +496,8 @@
std::shared_ptr<StepExecutor>* executor) const {
*executor = nullptr;
- LOG(DEBUG) << "ExecutionPlan::next(" << controller << ", " << executor
- << "): mNextStepIndex = " << controller->mNextStepIndex;
+ VLOG(EXECUTION) << "ExecutionPlan::next(" << controller << ", " << executor
+ << "): mNextStepIndex = " << controller->mNextStepIndex;
if (controller->mNextStepIndex == Controller::kBadStepIndex) {
return ANEURALNETWORKS_OP_FAILED;
@@ -614,7 +615,7 @@
if (mBody) {
mBody->dump();
} else {
- LOG(DEBUG) << "EMPTY";
+ VLOG(COMPILATION) << "EMPTY";
}
}
@@ -644,7 +645,7 @@
}
void ExecutionPlan::SimpleBody::dump() const {
- LOG(DEBUG) << "SIMPLE for " << (mDevice == nullptr ? "CPU" : mDevice->getName());
+ VLOG(COMPILATION) << "SIMPLE for " << (mDevice == nullptr ? "CPU" : mDevice->getName());
}
void ExecutionPlan::CompoundBody::dump() const {
@@ -664,8 +665,8 @@
const size_t deviceCount = nonCpuDeviceCount + 1;
const size_t operationCount = mOperations.size();
- LOG(DEBUG) << "ModelBuilder::partitionTheWork: deviceCount = " << deviceCount
- << ", operationCount = " << operationCount;
+ VLOG(COMPILATION) << "ModelBuilder::partitionTheWork: deviceCount = " << deviceCount
+ << ", operationCount = " << operationCount;
// If we only have the CPU, or if the graph has no operations, no
// need to try to partition.
@@ -686,11 +687,9 @@
std::not_equal_to<int>()) == bestDeviceForOperation.end()) {
const int bestDeviceIndex = bestDeviceForOperation[0];
const bool cpu = (size_t(bestDeviceIndex) == deviceCount - 1);
- if (WOULD_LOG(DEBUG)) {
- LOG(DEBUG) << "ModelBuilder::partitionTheWork: only one best device: "
- << bestDeviceIndex << " = "
- << (cpu ? "CPU" : devices[bestDeviceIndex]->getName());
- }
+ VLOG(COMPILATION) << "ModelBuilder::partitionTheWork: only one best device: "
+ << bestDeviceIndex << " = "
+ << (cpu ? "CPU" : devices[bestDeviceIndex]->getName());
plan->becomeSingleStep(cpu ? nullptr : devices[bestDeviceIndex], this);
return plan->finish(this);
}
@@ -704,7 +703,8 @@
auto enqueueOnAppropriateDevice = [&](uint32_t operationIndex) {
int deviceIndex = bestDeviceForOperation[operationIndex];
perDeviceQueue[deviceIndex].push(operationIndex);
- LOG(DEBUG) << "enqueueOnAppropriateDevice " << operationIndex << " onto " << deviceIndex;
+ VLOG(COMPILATION) << "enqueueOnAppropriateDevice " << operationIndex << " onto "
+ << deviceIndex;
};
// This helper function finds a device that has operations ready to process.
@@ -726,7 +726,7 @@
while (true) {
// Find the device we'll do this step for.
int deviceIndex = findNextDeviceToProcess();
- LOG(DEBUG) << "findNextDeviceToProcess: " << deviceIndex;
+ VLOG(COMPILATION) << "findNextDeviceToProcess: " << deviceIndex;
if (deviceIndex < 0) {
break;
}
@@ -747,10 +747,11 @@
}
int n = plan->finish(this);
- if (WOULD_LOG(DEBUG)) {
+ if (VLOG_IS_ON(COMPILATION)) {
Model model;
setHidlModel(&model);
- LOG(DEBUG) << "ModelBuilder::partitionTheWork: original model: " << toString(model);
+ VLOG(COMPILATION) << "ModelBuilder::partitionTheWork: original model: "
+ << toString(model);
plan->dump();
}
return n;
@@ -846,10 +847,10 @@
// TODO What if it is an OEM op?
(*bestDeviceForOperation)[operationIndex] =
bestChoice >= 0 ? bestChoice : static_cast<int>(nonCpuDeviceCount);
- LOG(VERBOSE) << "ModelBuilder::findBestDeviceForEachOperation("
- << toString(getOperation(operationIndex).type)
- << ") = "
- << (*bestDeviceForOperation)[operationIndex];
+ VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation("
+ << toString(getOperation(operationIndex).type)
+ << ") = "
+ << (*bestDeviceForOperation)[operationIndex];
}
return ANEURALNETWORKS_NO_ERROR;
}
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp
index 42e0eed..25ccb21 100644
--- a/runtime/Manager.cpp
+++ b/runtime/Manager.cpp
@@ -44,8 +44,8 @@
if (status != ErrorStatus::NONE) {
LOG(ERROR) << "IDevice::getCapabilities returned the error " << toString(status);
}
- LOG(DEBUG) << "Capab " << capabilities.float32Performance.execTime;
- LOG(DEBUG) << "Capab " << capabilities.quantized8Performance.execTime;
+ VLOG(MANAGER) << "Capab " << capabilities.float32Performance.execTime;
+ VLOG(MANAGER) << "Capab " << capabilities.quantized8Performance.execTime;
mFloat32Performance = capabilities.float32Performance;
mQuantized8Performance = capabilities.quantized8Performance;
});
@@ -118,7 +118,7 @@
void DeviceManager::findAvailableDevices() {
using ::android::hardware::neuralnetworks::V1_0::IDevice;
using ::android::hidl::manager::V1_0::IServiceManager;
- LOG(DEBUG) << "findAvailableDevices";
+ VLOG(MANAGER) << "findAvailableDevices";
sp<IServiceManager> manager = hardware::defaultServiceManager();
if (manager == nullptr) {
@@ -128,7 +128,7 @@
manager->listByInterface(IDevice::descriptor, [this](const hidl_vec<hidl_string>& names) {
for (const auto& name : names) {
- LOG(DEBUG) << "Found interface " << name.c_str();
+ VLOG(MANAGER) << "Found interface " << name.c_str();
sp<IDevice> device = IDevice::getService(name);
if (device == nullptr) {
LOG(ERROR) << "Got a null IDEVICE for " << name.c_str();
@@ -140,7 +140,7 @@
}
DeviceManager::DeviceManager() {
- LOG(VERBOSE) << "DeviceManager::DeviceManager";
+ VLOG(MANAGER) << "DeviceManager::DeviceManager";
findAvailableDevices();
#ifdef NN_DEBUGGABLE
mPartitioning = getProp("debug.nn.partition", kPartitioningDefault);
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index f9785c2..979ca7f 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -235,6 +235,7 @@
}
int ANeuralNetworksModel_create(ANeuralNetworksModel** model) {
+ initVLogMask();
if (!model) {
LOG(ERROR) << "ANeuralNetworksModel_create passed a nullptr";
return ANEURALNETWORKS_UNEXPECTED_NULL;
diff --git a/runtime/test/TestGenerated.cpp b/runtime/test/TestGenerated.cpp
index 4c794aa..4602699 100644
--- a/runtime/test/TestGenerated.cpp
+++ b/runtime/test/TestGenerated.cpp
@@ -158,10 +158,7 @@
class GeneratedTests : public ::testing::Test {
protected:
- virtual void SetUp() {
- // For detailed logs, uncomment this line:
- // SetMinimumLogSeverity(android::base::VERBOSE);
- }
+ virtual void SetUp() {}
};
// Testcases generated from runtime/test/specs/*.mod.py
diff --git a/runtime/test/TestMain.cpp b/runtime/test/TestMain.cpp
index 8445ac7..29d8076 100644
--- a/runtime/test/TestMain.cpp
+++ b/runtime/test/TestMain.cpp
@@ -16,7 +16,6 @@
#include "Manager.h"
#include "NeuralNetworksWrapper.h"
-//#include "SampleDriver.h"
#include "Utils.h"
#include <gtest/gtest.h>
@@ -26,6 +25,7 @@
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
+ android::nn::initVLogMask();
// Test with the installed drivers.
int n1 = RUN_ALL_TESTS();
diff --git a/runtime/test/TestMemory.cpp b/runtime/test/TestMemory.cpp
index 30785db..74931ca 100644
--- a/runtime/test/TestMemory.cpp
+++ b/runtime/test/TestMemory.cpp
@@ -32,10 +32,7 @@
// Tests the various ways to pass weights and input/output data.
class MemoryTest : public ::testing::Test {
protected:
- virtual void SetUp() {
- // For detailed logs, uncomment this line:
- // SetMinimumLogSeverity(android::base::VERBOSE);
- }
+ virtual void SetUp() {}
const Matrix3x4 matrix1 = {{1.f, 2.f, 3.f, 4.f}, {5.f, 6.f, 7.f, 8.f}, {9.f, 10.f, 11.f, 12.f}};
const Matrix3x4 matrix2 = {{100.f, 200.f, 300.f, 400.f},
diff --git a/runtime/test/TestTrivialModel.cpp b/runtime/test/TestTrivialModel.cpp
index 14935e3..1eda104 100644
--- a/runtime/test/TestTrivialModel.cpp
+++ b/runtime/test/TestTrivialModel.cpp
@@ -28,10 +28,7 @@
class TrivialTest : public ::testing::Test {
protected:
- virtual void SetUp() {
- // For detailed logs, uncomment this line:
- // SetMinimumLogSeverity(android::base::VERBOSE);
- }
+ virtual void SetUp() {}
const Matrix3x4 matrix1 = {{1.f, 2.f, 3.f, 4.f}, {5.f, 6.f, 7.f, 8.f}, {9.f, 10.f, 11.f, 12.f}};
const Matrix3x4 matrix2 = {{100.f, 200.f, 300.f, 400.f},
diff --git a/runtime/test/TestValidation.cpp b/runtime/test/TestValidation.cpp
index 4ac8fc1..d7056c9 100644
--- a/runtime/test/TestValidation.cpp
+++ b/runtime/test/TestValidation.cpp
@@ -26,10 +26,7 @@
namespace {
class ValidationTest : public ::testing::Test {
protected:
- virtual void SetUp() {
- // For detailed logs, uncomment this line:
- // SetMinimumLogSeverity(android::base::VERBOSE);
- }
+ virtual void SetUp() {}
};
class ValidationTestModel : public ValidationTest {