NN runtime: ANAPIC review follow up

This topic makes the following three HAL interface changes:
* Removes @1.3::Operand.ExtraParams, because it was the same as
  @1.2::Operand.ExtraParams
* Changes int32_t token to uint32_t for IDevice::allocate and for
  @1.3::Request.MemoryPool
* Renames OptionalTimePoint::nanoseconds to
  OptionalTimePoint::nanosecondsSinceEpoch

This CL also makes runtime changes in response to the interface changes,
and fixes a minor comment issue in NeuralNetworks.h.

Bug: 148617339
Test: mma
Test: CtsNNAPITestCases
Test: NeuralNetworksTest_static
Change-Id: I3ecc8f3f38e0551c309824c24ecd780a07cc1959
Merged-In: I3ecc8f3f38e0551c309824c24ecd780a07cc1959
(cherry picked from commit 131d838cd44536405d5ea394558625192b77901e)
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index 08a0ee3..496fc35 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -56,7 +56,7 @@
     OperandType getInputType(uint32_t index) const override;
     Shape getInputShape(uint32_t index) const override;
     const void* getInputBuffer(uint32_t index) const override;
-    const Operand::ExtraParams getInputExtraParams(uint32_t index) const override;
+    const OperandExtraParams getInputExtraParams(uint32_t index) const override;
 
     uint32_t getNumOutputs() const override;
     OperandType getOutputType(uint32_t index) const override;
@@ -114,7 +114,7 @@
     return getInputInfo(index)->buffer;
 }
 
-const Operand::ExtraParams OperationExecutionContext::getInputExtraParams(uint32_t index) const {
+const OperandExtraParams OperationExecutionContext::getInputExtraParams(uint32_t index) const {
     return getInputInfo(index)->extraParams;
 }
 
diff --git a/common/MetaModel.cpp b/common/MetaModel.cpp
index 2d30417..30d88a1 100644
--- a/common/MetaModel.cpp
+++ b/common/MetaModel.cpp
@@ -292,19 +292,19 @@
             return a.channelDim < b.channelDim;
         }
 
-        static bool compare(const Operand::ExtraParams& a, const Operand::ExtraParams& b) {
+        static bool compare(const OperandExtraParams& a, const OperandExtraParams& b) {
             if (a.getDiscriminator() != b.getDiscriminator()) {
                 return a.getDiscriminator() < b.getDiscriminator();
             }
 
             switch (a.getDiscriminator()) {
-                case Operand::ExtraParams::hidl_discriminator::channelQuant:
+                case OperandExtraParams::hidl_discriminator::channelQuant:
                     return compare(a.channelQuant(), b.channelQuant());
 
-                case Operand::ExtraParams::hidl_discriminator::extension:
+                case OperandExtraParams::hidl_discriminator::extension:
                     return a.extension() < b.extension();
 
-                case Operand::ExtraParams::hidl_discriminator::none:
+                case OperandExtraParams::hidl_discriminator::none:
                     return false;
 
                 default:
diff --git a/common/Utils.cpp b/common/Utils.cpp
index 1046baf..86983ea 100644
--- a/common/Utils.cpp
+++ b/common/Utils.cpp
@@ -105,7 +105,7 @@
     const uint64_t nanosecondsAtTimeout = nanosecondsSinceEpoch + duration;
 
     OptionalTimePoint otp;
-    otp.nanoseconds(nanosecondsAtTimeout);
+    otp.nanosecondsSinceEpoch(nanosecondsAtTimeout);
     return {ANEURALNETWORKS_NO_ERROR, otp};
 }
 
@@ -167,7 +167,7 @@
     uint32_t getNumInputs() const override;
     OperandType getInputType(uint32_t index) const override;
     Shape getInputShape(uint32_t index) const override;
-    const Operand::ExtraParams getInputExtraParams(uint32_t index) const override;
+    const OperandExtraParams getInputExtraParams(uint32_t index) const override;
 
     uint32_t getNumOutputs() const override;
     OperandType getOutputType(uint32_t index) const override;
@@ -222,7 +222,7 @@
             operand->extraParams};
 }
 
-const Operand::ExtraParams OperationValidationContext::getInputExtraParams(uint32_t index) const {
+const OperandExtraParams OperationValidationContext::getInputExtraParams(uint32_t index) const {
     return getInputOperand(index)->extraParams;
 }
 
@@ -2751,26 +2751,6 @@
     return static_cast<V1_0::OperandType>(operandType);
 }
 
-template <typename InExtraParams, typename OutExtraParams>
-OutExtraParams copyExtraParams(const InExtraParams& extraParams) {
-    OutExtraParams out;
-    switch (extraParams.getDiscriminator()) {
-        case InExtraParams::hidl_discriminator::none: {
-            out.none(extraParams.none());
-        } break;
-        case InExtraParams::hidl_discriminator::channelQuant: {
-            out.channelQuant({
-                    .scales = extraParams.channelQuant().scales,
-                    .channelDim = extraParams.channelQuant().channelDim,
-            });
-        } break;
-        case InExtraParams::hidl_discriminator::extension: {
-            out.extension(extraParams.extension());
-        } break;
-    }
-    return out;
-}
-
 bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime) {
     return true;
 }
@@ -2845,8 +2825,7 @@
             .zeroPoint = operand.zeroPoint,
             .lifetime = static_cast<V1_0::OperandLifeTime>(operand.lifetime),
             .location = operand.location,
-            .extraParams = copyExtraParams<V1_3::Operand::ExtraParams, V1_2::Operand::ExtraParams>(
-                    operand.extraParams)};
+            .extraParams = operand.extraParams};
 }
 
 V1_3::Operand convertToV1_3(const V1_0::Operand& operand) {
@@ -2867,8 +2846,7 @@
             .zeroPoint = operand.zeroPoint,
             .lifetime = convertToV1_3(operand.lifetime),
             .location = operand.location,
-            .extraParams = copyExtraParams<V1_2::Operand::ExtraParams, V1_3::Operand::ExtraParams>(
-                    operand.extraParams)};
+            .extraParams = operand.extraParams};
 }
 
 V1_3::Operand convertToV1_3(const V1_3::Operand& operand) {
diff --git a/common/ValidateHal.cpp b/common/ValidateHal.cpp
index 828c71c..bff08f1 100644
--- a/common/ValidateHal.cpp
+++ b/common/ValidateHal.cpp
@@ -113,14 +113,14 @@
         case OperandType::TENSOR_QUANT16_SYMM:
         case OperandType::TENSOR_BOOL8: {
             NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
-                         V1_3::Operand::ExtraParams::hidl_discriminator::none)
+                         OperandExtraParams::hidl_discriminator::none)
                     << "Operand " << index << ": Operand of type "
                     << getOperandTypeName(operand.type)
                     << " has incorrect extraParams: " << toString(operand.extraParams);
         } break;
         case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
             NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
-                         V1_3::Operand::ExtraParams::hidl_discriminator::channelQuant)
+                         OperandExtraParams::hidl_discriminator::channelQuant)
                     << "Operand " << index << ": Operand of type "
                     << getOperandTypeName(operand.type) << " without a Channel Quantization params";
             auto& channelQuant = operand.extraParams.channelQuant();
@@ -150,9 +150,9 @@
         default: {
             if (isExtensionOperandType(operand.type)) {
                 NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
-                                     V1_3::Operand::ExtraParams::hidl_discriminator::extension ||
+                                     OperandExtraParams::hidl_discriminator::extension ||
                              operand.extraParams.getDiscriminator() ==
-                                     V1_3::Operand::ExtraParams::hidl_discriminator::none)
+                                     OperandExtraParams::hidl_discriminator::none)
                         << "Operand " << index << ": Extension operand of type "
                         << getOperandTypeName(operand.type)
                         << " has incorrect extraParams: " << toString(operand.extraParams);
diff --git a/common/include/CpuExecutor.h b/common/include/CpuExecutor.h
index a099099..5ca8a64 100644
--- a/common/include/CpuExecutor.h
+++ b/common/include/CpuExecutor.h
@@ -70,7 +70,7 @@
     // always 0.
     uint32_t numberOfUsesLeft;
 
-    hal::Operand::ExtraParams extraParams;
+    hal::OperandExtraParams extraParams;
 
     Shape shape() const {
         return {
diff --git a/common/include/HalInterfaces.h b/common/include/HalInterfaces.h
index 2f20afc..a16d126 100644
--- a/common/include/HalInterfaces.h
+++ b/common/include/HalInterfaces.h
@@ -102,6 +102,7 @@
 using V1_3::Subgraph;
 using ExtensionNameAndPrefix = V1_2::Model::ExtensionNameAndPrefix;
 using ExtensionTypeEncoding = V1_2::Model::ExtensionTypeEncoding;
+using OperandExtraParams = V1_2::Operand::ExtraParams;
 
 using CacheToken =
         hardware::hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
diff --git a/common/include/OperationsUtils.h b/common/include/OperationsUtils.h
index 3a6dc94..a50b522 100644
--- a/common/include/OperationsUtils.h
+++ b/common/include/OperationsUtils.h
@@ -49,7 +49,7 @@
     std::vector<uint32_t> dimensions;
     float scale = 0.0f;
     int32_t offset = 0;
-    hal::Operand::ExtraParams extraParams;
+    hal::OperandExtraParams extraParams;
 };
 
 // Provides information available during graph creation to validate an operation.
@@ -78,7 +78,7 @@
     virtual uint32_t getNumInputs() const = 0;
     virtual hal::OperandType getInputType(uint32_t index) const = 0;
     virtual Shape getInputShape(uint32_t index) const = 0;
-    virtual const hal::Operand::ExtraParams getInputExtraParams(uint32_t index) const = 0;
+    virtual const hal::OperandExtraParams getInputExtraParams(uint32_t index) const = 0;
 
     virtual uint32_t getNumOutputs() const = 0;
     virtual hal::OperandType getOutputType(uint32_t index) const = 0;
@@ -94,7 +94,7 @@
     virtual hal::OperandType getInputType(uint32_t index) const = 0;
     virtual Shape getInputShape(uint32_t index) const = 0;
     virtual const void* getInputBuffer(uint32_t index) const = 0;
-    virtual const hal::Operand::ExtraParams getInputExtraParams(uint32_t index) const = 0;
+    virtual const hal::OperandExtraParams getInputExtraParams(uint32_t index) const = 0;
 
     virtual uint32_t getNumOutputs() const = 0;
     virtual hal::OperandType getOutputType(uint32_t index) const = 0;
diff --git a/runtime/ExecutionPlan.cpp b/runtime/ExecutionPlan.cpp
index 70a1de2..c655d0f 100644
--- a/runtime/ExecutionPlan.cpp
+++ b/runtime/ExecutionPlan.cpp
@@ -95,7 +95,7 @@
                            const Operand& fromOperand) {
     if (fromOperand.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
         fromOperand.extraParams.getDiscriminator() ==
-                Operand::ExtraParams::hidl_discriminator::channelQuant) {
+                OperandExtraParams::hidl_discriminator::channelQuant) {
         auto& fromChannelQuant = fromOperand.extraParams.channelQuant();
         ANeuralNetworksSymmPerChannelQuantParams toChannelQuant = {
                 .channelDim = fromChannelQuant.channelDim,
@@ -105,12 +105,12 @@
         return model.setOperandSymmPerChannelQuantParams(toOperandIndex, toChannelQuant);
     } else if (isExtensionOperandType(fromOperand.type) &&
                fromOperand.extraParams.getDiscriminator() ==
-                       Operand::ExtraParams::hidl_discriminator::extension) {
+                       OperandExtraParams::hidl_discriminator::extension) {
         hidl_vec<uint8_t> extensionData = fromOperand.extraParams.extension();
         return model.setOperandExtensionData(toOperandIndex, extensionData.data(),
                                              extensionData.size());
     } else if (fromOperand.extraParams.getDiscriminator() !=
-                       Operand::ExtraParams::hidl_discriminator::none ||
+                       OperandExtraParams::hidl_discriminator::none ||
                fromOperand.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
         LOG(ERROR) << "Type " << toString(fromOperand.type)
                    << " has an unexpected extraParams discriminator: "
diff --git a/runtime/Memory.cpp b/runtime/Memory.cpp
index f8f90f7..34c8c56 100644
--- a/runtime/Memory.cpp
+++ b/runtime/Memory.cpp
@@ -182,7 +182,7 @@
 Memory::Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
     : kHidlMemory(std::move(memory)), mValidator(std::move(validator)) {}
 
-Memory::Memory(sp<hal::IBuffer> buffer, int32_t token)
+Memory::Memory(sp<hal::IBuffer> buffer, uint32_t token)
     : kBuffer(std::move(buffer)), kToken(token) {}
 
 Memory::~Memory() {
@@ -577,7 +577,7 @@
 };
 
 std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<hal::IBuffer> buffer,
-                                                                           int32_t token) {
+                                                                           uint32_t token) {
     if (buffer == nullptr) {
         LOG(ERROR) << "nullptr IBuffer for device memory.";
         return {ANEURALNETWORKS_BAD_DATA, nullptr};
@@ -589,7 +589,7 @@
     return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFromDevice>(std::move(buffer), token)};
 };
 
-MemoryFromDevice::MemoryFromDevice(sp<hal::IBuffer> buffer, int32_t token)
+MemoryFromDevice::MemoryFromDevice(sp<hal::IBuffer> buffer, uint32_t token)
     : Memory(std::move(buffer), token) {}
 
 }  // namespace nn
diff --git a/runtime/Memory.h b/runtime/Memory.h
index 386578f..a58dcae 100644
--- a/runtime/Memory.h
+++ b/runtime/Memory.h
@@ -193,13 +193,13 @@
    protected:
     Memory(hal::hidl_memory memory);
     Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator);
-    Memory(sp<hal::IBuffer> buffer, int32_t token);
+    Memory(sp<hal::IBuffer> buffer, uint32_t token);
 
     // The HIDL representation for this memory.  We will use one of the following values
     // when communicating with the drivers.
     const hal::hidl_memory kHidlMemory;
     const sp<hal::IBuffer> kBuffer;
-    const int32_t kToken = 0;
+    const uint32_t kToken = 0;
 
     std::unique_ptr<MemoryValidatorBase> mValidator;
 
@@ -311,10 +311,10 @@
     // On success, returns ANEURALNETWORKS_NO_ERROR and a memory object.
     // On error, returns the appropriate NNAPI error code and nullptr.
     static std::pair<int, std::unique_ptr<MemoryFromDevice>> create(sp<hal::IBuffer> buffer,
-                                                                    int32_t token);
+                                                                    uint32_t token);
 
     // prefer using MemoryFromDevice::create
-    MemoryFromDevice(sp<hal::IBuffer> buffer, int32_t token);
+    MemoryFromDevice(sp<hal::IBuffer> buffer, uint32_t token);
 };
 
 using MemoryTracker = ObjectTracker<Memory>;
diff --git a/runtime/ModelBuilder.cpp b/runtime/ModelBuilder.cpp
index 28b78ae..e3603d6 100644
--- a/runtime/ModelBuilder.cpp
+++ b/runtime/ModelBuilder.cpp
@@ -98,7 +98,7 @@
             .zeroPoint = type.zeroPoint,
             .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
-            .extraParams = Operand::ExtraParams(),
+            .extraParams = OperandExtraParams(),
     });
     mHasOEMOperand |= isOemOperand;
     return ANEURALNETWORKS_NO_ERROR;
diff --git a/runtime/VersionedInterfaces.cpp b/runtime/VersionedInterfaces.cpp
index b4d081c..4178e23 100644
--- a/runtime/VersionedInterfaces.cpp
+++ b/runtime/VersionedInterfaces.cpp
@@ -1549,11 +1549,11 @@
     return kServiceName;
 }
 
-std::tuple<ErrorStatus, sp<IBuffer>, int32_t> VersionedIDevice::allocate(
+std::tuple<ErrorStatus, sp<IBuffer>, uint32_t> VersionedIDevice::allocate(
         const BufferDesc& desc,
         const std::vector<std::shared_ptr<VersionedIPreparedModel>>& versionedPreparedModels,
         const hidl_vec<BufferRole>& inputRoles, const hidl_vec<BufferRole>& outputRoles) const {
-    const auto kFailure = std::make_tuple<ErrorStatus, sp<IBuffer>, int32_t>(
+    const auto kFailure = std::make_tuple<ErrorStatus, sp<IBuffer>, uint32_t>(
             ErrorStatus::GENERAL_FAILURE, nullptr, 0);
 
     // version 1.3+ HAL
@@ -1566,11 +1566,11 @@
         std::tuple<ErrorStatus, sp<IBuffer>, int32_t> result;
         const Return<void> ret = recoverable<void, V1_3::IDevice>(
                 __FUNCTION__, [&](const sp<V1_3::IDevice>& device) {
-                    return device->allocate(
-                            desc, preparedModels, inputRoles, outputRoles,
-                            [&result](ErrorStatus error, const sp<IBuffer>& buffer, int32_t token) {
-                                result = {error, buffer, token};
-                            });
+                    return device->allocate(desc, preparedModels, inputRoles, outputRoles,
+                                            [&result](ErrorStatus error, const sp<IBuffer>& buffer,
+                                                      uint32_t token) {
+                                                result = {error, buffer, token};
+                                            });
                 });
         if (!ret.isOk()) {
             LOG(ERROR) << "allocate failure: " << ret.description();
diff --git a/runtime/VersionedInterfaces.h b/runtime/VersionedInterfaces.h
index 7711efa..203e919 100644
--- a/runtime/VersionedInterfaces.h
+++ b/runtime/VersionedInterfaces.h
@@ -382,7 +382,7 @@
      *       execution. If the buffer was unable to be allocated due to an error, the token must be
      *       0.
      */
-    std::tuple<hal::ErrorStatus, sp<hal::IBuffer>, int32_t> allocate(
+    std::tuple<hal::ErrorStatus, sp<hal::IBuffer>, uint32_t> allocate(
             const hal::BufferDesc& desc,
             const std::vector<std::shared_ptr<VersionedIPreparedModel>>& preparedModels,
             const hal::hidl_vec<hal::BufferRole>& inputRoles,
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 20c9021..b4851fc 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -3504,7 +3504,7 @@
      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
      *      walking through input in the ‘height’ dimension.
      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
-            groups.
+     *      groups.
      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
      *       {@link FuseCode} values. Specifies the activation to
      *       invoke on the result.
diff --git a/tools/api/types.spec b/tools/api/types.spec
index 55c48f6..ebd5288 100644
--- a/tools/api/types.spec
+++ b/tools/api/types.spec
@@ -4026,7 +4026,7 @@
      * * 8: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when
      *      walking through input in the ‘height’ dimension.
      * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the number of
-            groups.
+     *      groups.
      * * 10: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the
      *       {@link %{FusedActivationFunc}} values. Specifies the activation to
      *       invoke on the result.